diff --git a/.gitignore b/.gitignore index 48f1f223318..d84fc479f03 100644 --- a/.gitignore +++ b/.gitignore @@ -381,3 +381,5 @@ /modules/suggest/*.iml /modules/suggest/pom.xml /modules/suggest/dist + +/modules/spatial/build/ diff --git a/build.xml b/build.xml index c589347af64..850d81b1cfa 100644 --- a/build.xml +++ b/build.xml @@ -18,6 +18,12 @@ --> + + + + + + diff --git a/dev-tools/eclipse/dot.classpath b/dev-tools/eclipse/dot.classpath index 06ea9870b47..77405969be1 100644 --- a/dev-tools/eclipse/dot.classpath +++ b/dev-tools/eclipse/dot.classpath @@ -169,5 +169,6 @@ + diff --git a/lucene/build.xml b/lucene/build.xml index 2d3e512f87b..2b8fab0f4e7 100644 --- a/lucene/build.xml +++ b/lucene/build.xml @@ -74,9 +74,6 @@ - - - @@ -98,10 +95,22 @@ - + + + Warning: Ignoring your multiplier and nightly settings for backwards tests. + These tests are for API compatibility only! + + - + + + @@ -113,13 +122,6 @@ - - - Warning: Ignoring your multiplier and nightly settings for backwards tests. - These tests are for API compatibility only! - - - @@ -128,30 +130,17 @@ Note: we disable multiplier/nightly because the purpose is to find API breaks --> - + - - - - - - - - - - - - - - - - - - - - - + + + @@ -86,9 +85,6 @@ - - - @@ -171,13 +167,6 @@ - - - - - @@ -263,7 +252,15 @@ - + + + + + + + + + @@ -271,6 +268,7 @@ only special cases need bundles --> + This build requires Ivy and Ivy could not be found in your ant classpath. @@ -622,152 +620,332 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + - - - + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Tests failed! - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 5 slowest tests: + + + + - + + + Showing ${max} slowest tests according to local stats. (change with -Dmax=...). + + + - + Showing ${max} slowest tests in cached stats. (change with -Dmax=...). + + + + + + + + + +# +# Test case filtering. -------------------------------------------- +# +# - 'tests.class' is a class-filtering shell-like glob pattern, +# 'testcase' is an alias of "tests.class=*.${testcase}" +# - 'tests.method' is a method-filtering glob pattern. +# 'testmethod' is an alias of "tests.method=${testmethod}" +# + +# Run a single test case (variants) +ant test -Dtests.class=org.apache.lucene.package.ClassName +ant test "-Dtests.class=*.ClassName" +ant test -Dtestcase=ClassName + +# Run all tests in a package and sub-packages +ant test "-Dtests.class=org.apache.lucene.package.*" + +# Run any test methods that contain 'esi' (like: ...r*esi*ze...). +ant test "-Dtests.method=*esi*" + +# +# Seed and repetitions. ------------------------------------------- +# + +# Run with a given seed (seed is a hex-encoded long). +ant test -Dtests.seed=DEADBEEF + +# Repeats a given test N times (note filters). +ant test -Dtests.iters=N -Dtestcase=ClassName -Dtestmethod=mytest + +# Repeats _all_ tests of ClassName N times. Every test repetition +# will have a different seed. +ant test -Dtests.iters=N -Dtestcase=ClassName + +# Repeats _all_ tests of ClassName N times. Every test repetition +# will have exactly the same master (dead) and method-level (beef) +# seed. +ant test -Dtests.iters=N -Dtestcase=ClassName -Dtests.seed=dead:beef + +# +# Load balancing and caches. -------------------------------------- +# + +# Run sequentially (one slave JVM). +ant -Dtests.threadspercpu=1 test + +# Run with more slave JVMs than the default. +# Don't count hypercores for CPU-intense tests. +# Make sure there is enough RAM to handle child JVMs. +ant -Dtests.threadspercpu=8 test + +# Use repeatable suite order on slave JVMs (disables job stealing). +ant -Dtests.dynamicAssignmentRatio=0 test + +# Update global (versioned!) execution times cache (top level). +ant clean test +ant -f lucene/build.xml test-updatecache + +# +# Miscellaneous. -------------------------------------------------- +# + +# Run all tests without stopping on errors (inspect log files!). +ant -Dtests.haltonfailure=false test + +# Run more verbose output (slave JVM parameters, etc.). +ant -verbose test + +# Display local averaged stats, if any (30 slowest tests). +ant test-times -Dmax=30 + +# Output test files and reports. +${tests-output}/tests-report.txt - full ASCII tests report +${tests-output}/tests-failures.txt - failures only (if any) +${tests-output}/tests-timehints.txt - execution times (see above) +${tests-output}/tests-report-* - HTML5 report with results +${tests-output}/junit4-*.suites - per-JVM executed suites + (important if job stealing). + + + + + - - + + + + + + - - - - - - - - + + See http://issues.apache.org/jira/browse/LUCENE-721 + --> diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java index e4cdc32c757..5c2b64f7928 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java @@ -56,7 +56,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox jumped"; final Directory directory = newDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); FieldType customType = new FieldType(TextField.TYPE_UNSTORED); @@ -98,7 +98,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox jumped"; final Directory directory = newDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); @@ -169,7 +169,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox did not jump"; final Directory directory = newDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); @@ -212,7 +212,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox did not jump"; final Directory directory = newDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); @@ -252,7 +252,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox did not jump"; final Directory directory = newDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); FieldType customType = new FieldType(TextField.TYPE_UNSTORED); diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java index d2fb5ecf114..a41e1c9847c 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java @@ -70,7 +70,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte Directory ramDir; public IndexSearcher searcher = null; int numHighlights = 0; - final Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + Analyzer analyzer; TopDocs hits; String[] texts = { @@ -79,9 +79,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte "JFK has been shot", "John Kennedy has been shot", "This text has a typo in referring to Keneddy", "wordx wordy wordz wordx wordy wordx worda wordb wordy wordc", "y z x y z a b", "lets is a the lets is a the lets is a the lets" }; - + public void testQueryScorerHits() throws Exception { - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); PhraseQuery phraseQuery = new PhraseQuery(); phraseQuery.add(new Term(FIELD_NAME, "very")); @@ -153,9 +153,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte * This method intended for use with testHighlightingWithDefaultField() * @throws InvalidTokenOffsetsException */ - private static String highlightField(Query query, String fieldName, String text) + private String highlightField(Query query, String fieldName, String text) throws IOException, InvalidTokenOffsetsException { - TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true) + TokenStream tokenStream = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true) .tokenStream(fieldName, new StringReader(text)); // Assuming "", "" used to highlight SimpleHTMLFormatter formatter = new SimpleHTMLFormatter(); @@ -234,7 +234,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte Highlighter h = new Highlighter(this, scorer); - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); h.getBestFragment(analyzer, f1, content); @@ -1166,7 +1166,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } public void testMaxSizeHighlight() throws Exception { - final MockAnalyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + final MockAnalyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); // we disable MockTokenizer checks because we will forcefully limit the // tokenstream and call end() before incrementToken() returns false. analyzer.setEnableChecks(false); @@ -1201,7 +1201,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte CharacterRunAutomaton stopWords = new CharacterRunAutomaton(BasicAutomata.makeString("stoppedtoken")); // we disable MockTokenizer checks because we will forcefully limit the // tokenstream and call end() before incrementToken() returns false. - final MockAnalyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true); + final MockAnalyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopWords, true); analyzer.setEnableChecks(false); TermQuery query = new TermQuery(new Term("data", goodWord)); @@ -1252,7 +1252,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte Highlighter hg = getHighlighter(query, "text", fm); hg.setTextFragmenter(new NullFragmenter()); hg.setMaxDocCharsToAnalyze(36); - String match = hg.getBestFragment(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true), "text", text); + String match = hg.getBestFragment(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopWords, true), "text", text); assertTrue( "Matched text should contain remainder of text after highlighted query ", match.endsWith("in it")); @@ -1269,7 +1269,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte numHighlights = 0; // test to show how rewritten query can still be used searcher = new IndexSearcher(reader); - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); BooleanQuery query = new BooleanQuery(); query.add(new WildcardQuery(new Term(FIELD_NAME, "jf?")), Occur.SHOULD); @@ -1611,7 +1611,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } private Directory dir; - private Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + private Analyzer a; public void testWeightedTermsWithDeletes() throws IOException, InvalidTokenOffsetsException { makeIndex(); @@ -1626,7 +1626,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } private void makeIndex() throws IOException { - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) ); writer.addDocument( doc( "t_text1", "more random words for second field del" ) ); writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) ); @@ -1636,7 +1636,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } private void deleteDocument() throws IOException { - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND)); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND)); writer.deleteDocuments( new Term( "t_text1", "del" ) ); // To see negative idf, keep comment the following line //writer.forceMerge(1); @@ -1728,10 +1728,13 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte @Override public void setUp() throws Exception { super.setUp(); + + a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); + analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); dir = newDirectory(); ramDir = newDirectory(); IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true))); + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true))); for (String text : texts) { addDoc(writer, text); } diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java index c76a8ff9e50..d0a54cfe20c 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java @@ -88,9 +88,9 @@ public class HighlightCustomQueryTest extends LuceneTestCase { * * @throws InvalidTokenOffsetsException */ - private static String highlightField(Query query, String fieldName, + private String highlightField(Query query, String fieldName, String text) throws IOException, InvalidTokenOffsetsException { - TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, + TokenStream tokenStream = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true).tokenStream(fieldName, new StringReader(text)); // Assuming "", "" used to highlight diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java index 4d880122a2a..e0f66e0ccf0 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java @@ -84,9 +84,9 @@ public abstract class AbstractTestCase extends LuceneTestCase { @Override public void setUp() throws Exception { super.setUp(); - analyzerW = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + analyzerW = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); analyzerB = new BigramAnalyzer(); - analyzerK = new MockAnalyzer(random, MockTokenizer.KEYWORD, false); + analyzerK = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); dir = newDirectory(); } diff --git a/lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java b/lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java index 85a6c645f76..a0ceaca5c7c 100644 --- a/lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java +++ b/lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java @@ -108,14 +108,14 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { StringBuilder termField = new StringBuilder(); // add up to 250 terms to field "foo" - final int numFooTerms = random.nextInt(250 * RANDOM_MULTIPLIER); + final int numFooTerms = random().nextInt(250 * RANDOM_MULTIPLIER); for (int i = 0; i < numFooTerms; i++) { fooField.append(" "); fooField.append(randomTerm()); } // add up to 250 terms to field "term" - final int numTermTerms = random.nextInt(250 * RANDOM_MULTIPLIER); + final int numTermTerms = random().nextInt(250 * RANDOM_MULTIPLIER); for (int i = 0; i < numTermTerms; i++) { termField.append(" "); termField.append(randomTerm()); @@ -170,10 +170,10 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { * Return a random analyzer (Simple, Stop, Standard) to analyze the terms. */ private Analyzer randomAnalyzer() { - switch(random.nextInt(3)) { - case 0: return new MockAnalyzer(random, MockTokenizer.SIMPLE, true); - case 1: return new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); - default: return new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + switch(random().nextInt(3)) { + case 0: return new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); + case 1: return new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + default: return new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); } } @@ -192,21 +192,21 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { * the other half of the time, returns a random unicode string. */ private String randomTerm() { - if (random.nextBoolean()) { + if (random().nextBoolean()) { // return a random TEST_TERM - return TEST_TERMS[random.nextInt(TEST_TERMS.length)]; + return TEST_TERMS[random().nextInt(TEST_TERMS.length)]; } else { // return a random unicode term - return _TestUtil.randomUnicodeString(random); + return _TestUtil.randomUnicodeString(random()); } } public void testDocsEnumStart() throws Exception { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); MemoryIndex memory = new MemoryIndex(); memory.addField("foo", "bar", analyzer); AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader(); - DocsEnum disi = _TestUtil.docs(random, reader, "foo", new BytesRef("bar"), null, null, false); + DocsEnum disi = _TestUtil.docs(random(), reader, "foo", new BytesRef("bar"), null, null, false); int docid = disi.docID(); assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS); assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); @@ -222,7 +222,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { } public void testDocsAndPositionsEnumStart() throws Exception { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); MemoryIndex memory = new MemoryIndex(true); memory.addField("foo", "bar", analyzer); AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader(); @@ -250,7 +250,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { SpanQuery wrappedquery = new SpanMultiTermQueryWrapper(regex); MemoryIndex mindex = new MemoryIndex(); - mindex.addField("field", new MockAnalyzer(random).tokenStream("field", new StringReader("hello there"))); + mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", new StringReader("hello there"))); // This throws an NPE assertEquals(0, mindex.search(wrappedquery), 0.00001f); @@ -262,7 +262,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { SpanQuery wrappedquery = new SpanOrQuery(new SpanMultiTermQueryWrapper(regex)); MemoryIndex mindex = new MemoryIndex(); - mindex.addField("field", new MockAnalyzer(random).tokenStream("field", new StringReader("hello there"))); + mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", new StringReader("hello there"))); // This passes though assertEquals(0, mindex.search(wrappedquery), 0.00001f); diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java index 1d0002c6995..e6cc7cd9f98 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java @@ -39,7 +39,7 @@ public class TestIndexSplitter extends LuceneTestCase { mergePolicy.setNoCFSRatio(1); IndexWriter iw = new IndexWriter( fsDir, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.CREATE). setMergePolicy(mergePolicy) ); diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java index 54c58ad1824..a6837aca540 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java @@ -33,7 +33,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); Document doc; for (int i = 0; i < NUM_DOCS; i++) { doc = new Document(); diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java index b5fdfb4a982..ffdf51cbb0e 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java @@ -36,7 +36,7 @@ public class TestPKIndexSplitter extends LuceneTestCase { NumberFormat format = new DecimalFormat("000000000"); Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setOpenMode(OpenMode.CREATE).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); for (int x = 0; x < 11; x++) { Document doc = createDocument(x, "1", 3, format); @@ -56,7 +56,7 @@ public class TestPKIndexSplitter extends LuceneTestCase { // delete some documents w = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setOpenMode(OpenMode.APPEND).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); w.deleteDocuments(midTerm); w.deleteDocuments(new Term("id", format.format(2))); @@ -71,8 +71,8 @@ public class TestPKIndexSplitter extends LuceneTestCase { Directory dir1 = newDirectory(); Directory dir2 = newDirectory(); PKIndexSplitter splitter = new PKIndexSplitter(dir, dir1, dir2, splitTerm, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)), - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())), + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); splitter.split(); IndexReader ir1 = IndexReader.open(dir1); diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java b/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java index c67bc3b4e86..21090b23a7c 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java @@ -17,6 +17,8 @@ package org.apache.lucene.misc; * limitations under the License. */ +import java.util.Random; + import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.document.Document; @@ -39,8 +41,8 @@ public class TestHighFreqTerms extends LuceneTestCase { @BeforeClass public static void setUpClass() throws Exception { dir = newDirectory(); - writer = new IndexWriter(dir, newIndexWriterConfig(random, - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) + writer = new IndexWriter(dir, newIndexWriterConfig(random(), + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(2)); indexDocs(writer); reader = IndexReader.open(dir); @@ -194,7 +196,8 @@ public class TestHighFreqTerms extends LuceneTestCase { /********************Testing Utils**********************************/ private static void indexDocs(IndexWriter writer) throws Exception { - + Random rnd = random(); + /** * Generate 10 documents where term n has a docFreq of n and a totalTermFreq of n*2 (squared). */ @@ -202,9 +205,9 @@ public class TestHighFreqTerms extends LuceneTestCase { Document doc = new Document(); String content = getContent(i); - doc.add(newField(random, "FIELD_1", content, TextField.TYPE_STORED)); + doc.add(newField(rnd, "FIELD_1", content, TextField.TYPE_STORED)); //add a different field - doc.add(newField(random, "different_field", "diff", TextField.TYPE_STORED)); + doc.add(newField(rnd, "different_field", "diff", TextField.TYPE_STORED)); writer.addDocument(doc); } @@ -212,7 +215,7 @@ public class TestHighFreqTerms extends LuceneTestCase { //highest freq terms for a specific field. for (int i = 1; i <= 10; i++) { Document doc = new Document(); - doc.add(newField(random, "different_field", "diff", TextField.TYPE_STORED)); + doc.add(newField(rnd, "different_field", "diff", TextField.TYPE_STORED)); writer.addDocument(doc); } // add some docs where tf < df so we can see if sorting works @@ -223,7 +226,7 @@ public class TestHighFreqTerms extends LuceneTestCase { for (int i = 0; i < highTF; i++) { content += "highTF "; } - doc.add(newField(random, "FIELD_1", content, TextField.TYPE_STORED)); + doc.add(newField(rnd, "FIELD_1", content, TextField.TYPE_STORED)); writer.addDocument(doc); // highTF medium df =5 int medium_df = 5; @@ -234,7 +237,7 @@ public class TestHighFreqTerms extends LuceneTestCase { for (int j = 0; j < tf; j++) { newcontent += "highTFmedDF "; } - newdoc.add(newField(random, "FIELD_1", newcontent, TextField.TYPE_STORED)); + newdoc.add(newField(rnd, "FIELD_1", newcontent, TextField.TYPE_STORED)); writer.addDocument(newdoc); } // add a doc with high tf in field different_field @@ -244,7 +247,7 @@ public class TestHighFreqTerms extends LuceneTestCase { for (int i = 0; i < targetTF; i++) { content += "TF150 "; } - doc.add(newField(random, "different_field", content, TextField.TYPE_STORED)); + doc.add(newField(rnd, "different_field", content, TextField.TYPE_STORED)); writer.addDocument(doc); writer.close(); diff --git a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java index 715c4954681..cc7af8ddc78 100644 --- a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java +++ b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java @@ -45,7 +45,7 @@ public class DuplicateFilterTest extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); //Add series of docs with filterable fields : url, text and dates flags addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101"); @@ -135,7 +135,7 @@ public class DuplicateFilterTest extends LuceneTestCase { for (ScoreDoc hit : hits) { Document d = searcher.doc(hit.doc); String url = d.get(KEY_FIELD); - DocsEnum td = _TestUtil.docs(random, reader, + DocsEnum td = _TestUtil.docs(random(), reader, KEY_FIELD, new BytesRef(url), MultiFields.getLiveDocs(reader), @@ -159,7 +159,7 @@ public class DuplicateFilterTest extends LuceneTestCase { for (ScoreDoc hit : hits) { Document d = searcher.doc(hit.doc); String url = d.get(KEY_FIELD); - DocsEnum td = _TestUtil.docs(random, reader, + DocsEnum td = _TestUtil.docs(random(), reader, KEY_FIELD, new BytesRef(url), MultiFields.getLiveDocs(reader), diff --git a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java index 823a9237f8b..492735049c9 100644 --- a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java +++ b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java @@ -38,13 +38,15 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { private Directory directory; private IndexSearcher searcher; private IndexReader reader; - private Analyzer analyzer = new MockAnalyzer(random); + private Analyzer analyzer; @Override public void setUp() throws Exception { super.setUp(); + + analyzer = new MockAnalyzer(random()); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); //Add series of docs with misspelt names addDoc(writer, "jonathon smythe", "1"); @@ -122,7 +124,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { } public void testFuzzyLikeThisQueryEquals() { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); FuzzyLikeThisQuery fltq1 = new FuzzyLikeThisQuery(10, analyzer); fltq1.addTerms("javi", "subject", 0.5f, 2); FuzzyLikeThisQuery fltq2 = new FuzzyLikeThisQuery(10, analyzer); diff --git a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java index b6577610555..b1dd1622201 100644 --- a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java +++ b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java @@ -47,22 +47,22 @@ public class TestSlowCollationMethods extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { - final Locale locale = LuceneTestCase.randomLocale(random); + final Locale locale = LuceneTestCase.randomLocale(random()); collator = Collator.getInstance(locale); collator.setStrength(Collator.IDENTICAL); collator.setDecomposition(Collator.NO_DECOMPOSITION); numDocs = 1000 * RANDOM_MULTIPLIER; dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - String value = _TestUtil.randomUnicodeString(random); + String value = _TestUtil.randomUnicodeString(random()); Field field = newField("field", value, StringField.TYPE_STORED); doc.add(field); iw.addDocument(doc); } - splitDoc = _TestUtil.randomUnicodeString(random); + splitDoc = _TestUtil.randomUnicodeString(random()); reader = iw.getReader(); iw.close(); @@ -97,13 +97,13 @@ public class TestSlowCollationMethods extends LuceneTestCase { }); final Sort sort = new Sort(sf); - final TopDocs docs1 = searcher.search(TermRangeQuery.newStringRange("field", null, splitDoc, true, true), null, numDocs/(1+random.nextInt(4)), sort); + final TopDocs docs1 = searcher.search(TermRangeQuery.newStringRange("field", null, splitDoc, true, true), null, numDocs/(1+random().nextInt(4)), sort); doCheckSorting(docs1); - final TopDocs docs2 = searcher.search(TermRangeQuery.newStringRange("field", splitDoc, null, true, true), null, numDocs/(1+random.nextInt(4)), sort); + final TopDocs docs2 = searcher.search(TermRangeQuery.newStringRange("field", splitDoc, null, true, true), null, numDocs/(1+random().nextInt(4)), sort); doCheckSorting(docs2); - final TopDocs docs = TopDocs.merge(sort, numDocs/(1+random.nextInt(4)), new TopDocs[]{docs1, docs2}); + final TopDocs docs = TopDocs.merge(sort, numDocs/(1+random().nextInt(4)), new TopDocs[]{docs1, docs2}); doCheckSorting(docs); } @@ -130,8 +130,8 @@ public class TestSlowCollationMethods extends LuceneTestCase { public void testRangeQuery() throws Exception { int numQueries = 50*RANDOM_MULTIPLIER; for (int i = 0; i < numQueries; i++) { - String startPoint = _TestUtil.randomUnicodeString(random); - String endPoint = _TestUtil.randomUnicodeString(random); + String startPoint = _TestUtil.randomUnicodeString(random()); + String endPoint = _TestUtil.randomUnicodeString(random()); Query query = new SlowCollatedTermRangeQuery("field", startPoint, endPoint, true, true, collator); doTestRanges(startPoint, endPoint, query); } @@ -140,8 +140,8 @@ public class TestSlowCollationMethods extends LuceneTestCase { public void testRangeFilter() throws Exception { int numQueries = 50*RANDOM_MULTIPLIER; for (int i = 0; i < numQueries; i++) { - String startPoint = _TestUtil.randomUnicodeString(random); - String endPoint = _TestUtil.randomUnicodeString(random); + String startPoint = _TestUtil.randomUnicodeString(random()); + String endPoint = _TestUtil.randomUnicodeString(random()); Query query = new ConstantScoreQuery(new SlowCollatedTermRangeFilter("field", startPoint, endPoint, true, true, collator)); doTestRanges(startPoint, endPoint, query); } diff --git a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java index d72d516951f..802caed7bad 100644 --- a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java +++ b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java @@ -45,7 +45,7 @@ public class TestRegexQuery extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); Document doc = new Document(); doc.add(newField(FN, "the quick brown fox jumps over the lazy dog", TextField.TYPE_UNSTORED)); writer.addDocument(doc); diff --git a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java index 63d46971880..058b98eebbf 100644 --- a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java +++ b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java @@ -58,7 +58,7 @@ public class TestSpanRegexQuery extends LuceneTestCase { public void testSpanRegex() throws Exception { Directory directory = newDirectory(); IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); // doc.add(newField("field", "the quick brown fox jumps over the lazy dog", // Field.Store.NO, Field.Index.ANALYZED)); diff --git a/lucene/core/src/test/org/apache/lucene/TestDemo.java b/lucene/core/src/test/org/apache/lucene/TestDemo.java index 666533eda02..c23df94a206 100644 --- a/lucene/core/src/test/org/apache/lucene/TestDemo.java +++ b/lucene/core/src/test/org/apache/lucene/TestDemo.java @@ -39,13 +39,13 @@ import org.apache.lucene.util.LuceneTestCase; public class TestDemo extends LuceneTestCase { public void testDemo() throws IOException { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); // Store the index in memory: Directory directory = newDirectory(); // To store an index on disk, use this instead: //Directory directory = FSDirectory.open("/tmp/testindex"); - RandomIndexWriter iwriter = new RandomIndexWriter(random, directory, analyzer); + RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, analyzer); Document doc = new Document(); String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; diff --git a/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java index 80c2e825b6f..6f1d6beeb41 100644 --- a/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java +++ b/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java @@ -63,7 +63,7 @@ public class TestExternalCodecs extends LuceneTestCase { dir.setCheckIndexOnClose(false); // we use a custom codec provider IndexWriter w = new IndexWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setCodec(new CustomPerFieldCodec()). setMergePolicy(newLogMergePolicy(3)) ); diff --git a/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java b/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java index 8f0418f968b..512567482dd 100644 --- a/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java +++ b/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java @@ -95,7 +95,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase { doc.add(idField); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new MyMergeScheduler()) + TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergeScheduler(new MyMergeScheduler()) .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH) .setMergePolicy(newLogMergePolicy())); LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy(); diff --git a/lucene/core/src/test/org/apache/lucene/TestSearch.java b/lucene/core/src/test/org/apache/lucene/TestSearch.java index 5ec455c4a61..fdae73b621e 100644 --- a/lucene/core/src/test/org/apache/lucene/TestSearch.java +++ b/lucene/core/src/test/org/apache/lucene/TestSearch.java @@ -46,7 +46,7 @@ public class TestSearch extends LuceneTestCase { public void testSearch() throws Exception { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw, true); - doTestSearch(random, pw, false); + doTestSearch(random(), pw, false); pw.close(); sw.close(); String multiFileOutput = sw.getBuffer().toString(); @@ -54,7 +54,7 @@ public class TestSearch extends LuceneTestCase { sw = new StringWriter(); pw = new PrintWriter(sw, true); - doTestSearch(random, pw, true); + doTestSearch(random(), pw, true); pw.close(); sw.close(); String singleFileOutput = sw.getBuffer().toString(); diff --git a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java index 23d92bd59c5..6f78f84602d 100644 --- a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java +++ b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java @@ -50,7 +50,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw, true); final int MAX_DOCS = atLeast(225); - doTest(random, pw, false, MAX_DOCS); + doTest(random(), pw, false, MAX_DOCS); pw.close(); sw.close(); String multiFileOutput = sw.getBuffer().toString(); @@ -58,7 +58,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { sw = new StringWriter(); pw = new PrintWriter(sw, true); - doTest(random, pw, true, MAX_DOCS); + doTest(random(), pw, true, MAX_DOCS); pw.close(); sw.close(); String singleFileOutput = sw.getBuffer().toString(); diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java index f19eaf57cdc..73a1d8b3b71 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java @@ -37,7 +37,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase { public void testCaching() throws IOException { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); TokenStream stream = new TokenStream() { private int index = 0; diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java b/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java index 566c3e3e295..605bd289874 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.Reader; import java.util.ArrayList; import java.util.List; +import java.util.Random; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; @@ -165,12 +166,12 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer t = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - final TokenStream t2 = new MockGraphTokenFilter(random, t); + final TokenStream t2 = new MockGraphTokenFilter(random(), t); return new TokenStreamComponents(t, t2); } }; - checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k"); + checkAnalysisConsistency(random(), a, false, "a b c d e f g h i j k"); } } @@ -187,12 +188,12 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer t = new GraphTokenizer(reader); - final TokenStream t2 = new MockGraphTokenFilter(random, t); + final TokenStream t2 = new MockGraphTokenFilter(random(), t); return new TokenStreamComponents(t, t2); } }; - checkAnalysisConsistency(random, a, false, "a/x:3 c/y:2 d e f/z:4 g h i j k"); + checkAnalysisConsistency(random(), a, false, "a/x:3 c/y:2 d e f/z:4 g h i j k"); } } @@ -249,12 +250,13 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer t = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - final TokenStream t2 = new MockGraphTokenFilter(random, t); + final TokenStream t2 = new MockGraphTokenFilter(random(), t); final TokenStream t3 = new RemoveATokens(t2); return new TokenStreamComponents(t, t3); } }; + Random random = random(); checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k"); checkAnalysisConsistency(random, a, false, "x y a b c d e f g h i j k"); checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k a"); @@ -276,11 +278,12 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase { protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer t = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); final TokenStream t2 = new RemoveATokens(t); - final TokenStream t3 = new MockGraphTokenFilter(random, t2); + final TokenStream t3 = new MockGraphTokenFilter(random(), t2); return new TokenStreamComponents(t, t3); } }; + Random random = random(); checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k"); checkAnalysisConsistency(random, a, false, "x y a b c d e f g h i j k"); checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k a"); @@ -301,11 +304,12 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer t = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - final TokenStream t2 = new MockGraphTokenFilter(random, t); + final TokenStream t2 = new MockGraphTokenFilter(random(), t); return new TokenStreamComponents(t, t2); } }; + Random random = random(); checkRandomData(random, a, 5, atLeast(1000)); } } @@ -324,12 +328,13 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer t = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - final TokenStream t1 = new MockGraphTokenFilter(random, t); - final TokenStream t2 = new MockGraphTokenFilter(random, t1); + final TokenStream t1 = new MockGraphTokenFilter(random(), t); + final TokenStream t2 = new MockGraphTokenFilter(random(), t1); return new TokenStreamComponents(t, t2); } }; + Random random = random(); checkRandomData(random, a, 5, atLeast(1000)); } } @@ -347,12 +352,13 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer t = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - final TokenStream t1 = new MockGraphTokenFilter(random, t); - final TokenStream t2 = new MockHoleInjectingTokenFilter(random, t1); + final TokenStream t1 = new MockGraphTokenFilter(random(), t); + final TokenStream t2 = new MockHoleInjectingTokenFilter(random(), t1); return new TokenStreamComponents(t, t2); } }; + Random random = random(); checkRandomData(random, a, 5, atLeast(1000)); } } @@ -370,12 +376,13 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer t = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - final TokenStream t1 = new MockHoleInjectingTokenFilter(random, t); - final TokenStream t2 = new MockGraphTokenFilter(random, t1); + final TokenStream t1 = new MockHoleInjectingTokenFilter(random(), t); + final TokenStream t2 = new MockGraphTokenFilter(random(), t1); return new TokenStreamComponents(t, t2); } }; + Random random = random(); checkRandomData(random, a, 5, atLeast(1000)); } } diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestLookaheadTokenFilter.java b/lucene/core/src/test/org/apache/lucene/analysis/TestLookaheadTokenFilter.java index 960e04aea6e..35c7a567bcc 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/TestLookaheadTokenFilter.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/TestLookaheadTokenFilter.java @@ -19,6 +19,7 @@ package org.apache.lucene.analysis; import java.io.IOException; import java.io.Reader; +import java.util.Random; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; @@ -28,12 +29,13 @@ public class TestLookaheadTokenFilter extends BaseTokenStreamTestCase { Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { + Random random = random(); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, random.nextBoolean()); TokenStream output = new MockRandomLookaheadTokenFilter(random, tokenizer); return new TokenStreamComponents(tokenizer, output); } }; - checkRandomData(random, a, 200*RANDOM_MULTIPLIER, 8192); + checkRandomData(random(), a, 200*RANDOM_MULTIPLIER, 8192); } private static class NeverPeeksLookaheadTokenFilter extends LookaheadTokenFilter { @@ -56,11 +58,11 @@ public class TestLookaheadTokenFilter extends BaseTokenStreamTestCase { Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { - Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, random.nextBoolean()); + Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, random().nextBoolean()); TokenStream output = new NeverPeeksLookaheadTokenFilter(tokenizer); return new TokenStreamComponents(tokenizer, output); } }; - checkRandomData(random, a, 200*RANDOM_MULTIPLIER, 8192); + checkRandomData(random(), a, 200*RANDOM_MULTIPLIER, 8192); } } diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java index 0c52f254187..89045cef451 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java @@ -31,7 +31,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like WhitespaceAnalyzer */ public void testWhitespace() throws Exception { - Analyzer a = new MockAnalyzer(random); + Analyzer a = new MockAnalyzer(random()); assertAnalyzesTo(a, "A bc defg hiJklmn opqrstuv wxy z ", new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" }); assertAnalyzesToReuse(a, "aba cadaba shazam", @@ -42,7 +42,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like SimpleAnalyzer */ public void testSimple() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ", new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" }); assertAnalyzesToReuse(a, "aba4cadaba-Shazam", @@ -53,7 +53,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like KeywordAnalyzer */ public void testKeyword() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.KEYWORD, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ", new String[] { "a-bc123 defg+hijklmn567opqrstuv78wxy_z " }); assertAnalyzesToReuse(a, "aba4cadaba-Shazam", @@ -64,13 +64,13 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like StopAnalyzer */ public void testStop() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); assertAnalyzesTo(a, "the quick brown a fox", new String[] { "quick", "brown", "fox" }, new int[] { 2, 1, 2 }); // disable positions - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false); + a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false); assertAnalyzesTo(a, "the quick brown a fox", new String[] { "quick", "brown", "fox" }, new int[] { 1, 1, 1 }); @@ -83,7 +83,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { BasicOperations.complement( Automaton.union( Arrays.asList(BasicAutomata.makeString("foo"), BasicAutomata.makeString("bar"))))); - Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, keepWords, true); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, keepWords, true); assertAnalyzesTo(a, "quick foo brown bar bar fox foo", new String[] { "foo", "bar", "bar", "foo" }, new int[] { 2, 2, 1, 2 }); @@ -92,7 +92,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like LengthFilter */ public void testLength() throws Exception { CharacterRunAutomaton length5 = new CharacterRunAutomaton(new RegExp(".{5,}").toAutomaton()); - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, true, length5, true); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, length5, true); assertAnalyzesTo(a, "ok toolong fine notfine", new String[] { "ok", "fine" }, new int[] { 1, 2 }); @@ -101,7 +101,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { public void testLUCENE_3042() throws Exception { String testString = "t"; - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); TokenStream stream = analyzer.tokenStream("dummy", new StringReader(testString)); stream.reset(); while (stream.incrementToken()) { @@ -115,16 +115,16 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new MockAnalyzer(random), atLeast(1000)); + checkRandomData(random(), new MockAnalyzer(random()), atLeast(1000)); } public void testForwardOffsets() throws Exception { int num = atLeast(10000); for (int i = 0; i < num; i++) { - String s = _TestUtil.randomHtmlishString(random, 20); + String s = _TestUtil.randomHtmlishString(random(), 20); StringReader reader = new StringReader(s); MockCharFilter charfilter = new MockCharFilter(CharReader.get(reader), 2); - MockAnalyzer analyzer = new MockAnalyzer(random); + MockAnalyzer analyzer = new MockAnalyzer(random()); TokenStream ts = analyzer.tokenStream("bogus", charfilter); ts.reset(); while (ts.incrementToken()) { diff --git a/lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java b/lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java index 569830ff9b8..c6ad9e7b207 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java @@ -108,8 +108,8 @@ public class TestAppendingCodec extends LuceneTestCase { private static final String text = "the quick brown fox jumped over the lazy dog"; public void testCodec() throws Exception { - Directory dir = new AppendingRAMDirectory(random, new RAMDirectory()); - IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random)); + Directory dir = new AppendingRAMDirectory(random(), new RAMDirectory()); + IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random())); cfg.setCodec(new AppendingCodec()); ((TieredMergePolicy)cfg.getMergePolicy()).setUseCompoundFile(false); @@ -150,8 +150,8 @@ public class TestAppendingCodec extends LuceneTestCase { } public void testCompoundFile() throws Exception { - Directory dir = new AppendingRAMDirectory(random, new RAMDirectory()); - IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random)); + Directory dir = new AppendingRAMDirectory(random(), new RAMDirectory()); + IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random())); TieredMergePolicy mp = new TieredMergePolicy(); mp.setUseCompoundFile(true); mp.setNoCFSRatio(1.0); diff --git a/lucene/core/src/test/org/apache/lucene/codecs/intblock/TestIntBlockCodec.java b/lucene/core/src/test/org/apache/lucene/codecs/intblock/TestIntBlockCodec.java index b85f7fd7ecb..0054a96d073 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/intblock/TestIntBlockCodec.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/intblock/TestIntBlockCodec.java @@ -29,13 +29,13 @@ public class TestIntBlockCodec extends LuceneTestCase { IntStreamFactory f = new MockFixedIntBlockPostingsFormat(128).getIntFactory(); - IntIndexOutput out = f.createOutput(dir, "test", newIOContext(random)); + IntIndexOutput out = f.createOutput(dir, "test", newIOContext(random())); for(int i=0;i<11777;i++) { out.write(i); } out.close(); - IntIndexInput in = f.openInput(dir, "test", newIOContext(random)); + IntIndexInput in = f.openInput(dir, "test", newIOContext(random())); IntIndexInput.Reader r = in.reader(); for(int i=0;i<11777;i++) { @@ -50,12 +50,12 @@ public class TestIntBlockCodec extends LuceneTestCase { Directory dir = newDirectory(); IntStreamFactory f = new MockFixedIntBlockPostingsFormat(128).getIntFactory(); - IntIndexOutput out = f.createOutput(dir, "test", newIOContext(random)); + IntIndexOutput out = f.createOutput(dir, "test", newIOContext(random())); // write no ints out.close(); - IntIndexInput in = f.openInput(dir, "test", newIOContext(random)); + IntIndexInput in = f.openInput(dir, "test", newIOContext(random())); in.reader(); // read no ints in.close(); diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java index 4350627f53f..bf53ce39895 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java @@ -287,12 +287,12 @@ public class TestSurrogates extends LuceneTestCase { @Test public void testSurrogatesOrder() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, + RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig( TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setCodec(new PreFlexRWCodec())); + new MockAnalyzer(random())).setCodec(new PreFlexRWCodec())); - final int numField = _TestUtil.nextInt(random, 2, 5); + final int numField = _TestUtil.nextInt(random(), 2, 5); int uniqueTermCount = 0; @@ -307,7 +307,7 @@ public class TestSurrogates extends LuceneTestCase { final Set uniqueTerms = new HashSet(); for(int i=0;i=count1; i--) { - BitVector bv2 = new BitVector(d, "TESTBV", newIOContext(random)); + BitVector bv2 = new BitVector(d, "TESTBV", newIOContext(random())); assertTrue(doCompare(bv,bv2)); bv = bv2; bv.set(i); assertEquals(i,size-bv.count()); - bv.write(d, "TESTBV", newIOContext(random)); + bv.write(d, "TESTBV", newIOContext(random())); } } @@ -224,11 +224,11 @@ public class TestBitVector extends LuceneTestCase final int numBits = 10240; BitVector bv = new BitVector(numBits); bv.invertAll(); - int numToClear = random.nextInt(5); + int numToClear = random().nextInt(5); for(int i=0;i enums = new IdentityHashMap(); MatchNoBits bits = new Bits.MatchNoBits(r.maxDoc()); while ((iterator.next()) != null) { - DocsEnum docs = iterator.docs(random.nextBoolean() ? bits : new Bits.MatchNoBits(r.maxDoc()), null, random.nextBoolean()); + DocsEnum docs = iterator.docs(random().nextBoolean() ? bits : new Bits.MatchNoBits(r.maxDoc()), null, random().nextBoolean()); enums.put(docs, true); } @@ -72,10 +72,10 @@ public class TestReuseDocsEnum extends LuceneTestCase { public void testReuseDocsEnumSameBitsOrNull() throws IOException { Directory dir = newDirectory(); Codec cp = _TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); int numdocs = atLeast(20); - createRandomIndex(numdocs, writer, random); + createRandomIndex(numdocs, writer, random()); writer.commit(); DirectoryReader open = DirectoryReader.open(dir); @@ -87,7 +87,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { MatchNoBits bits = new Bits.MatchNoBits(open.maxDoc()); DocsEnum docs = null; while ((iterator.next()) != null) { - docs = iterator.docs(bits, docs, random.nextBoolean()); + docs = iterator.docs(bits, docs, random().nextBoolean()); enums.put(docs, true); } @@ -96,7 +96,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { iterator = terms.iterator(null); docs = null; while ((iterator.next()) != null) { - docs = iterator.docs(new Bits.MatchNoBits(open.maxDoc()), docs, random.nextBoolean()); + docs = iterator.docs(new Bits.MatchNoBits(open.maxDoc()), docs, random().nextBoolean()); enums.put(docs, true); } assertEquals(terms.size(), enums.size()); @@ -105,7 +105,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { iterator = terms.iterator(null); docs = null; while ((iterator.next()) != null) { - docs = iterator.docs(null, docs, random.nextBoolean()); + docs = iterator.docs(null, docs, random().nextBoolean()); enums.put(docs, true); } assertEquals(1, enums.size()); @@ -117,10 +117,10 @@ public class TestReuseDocsEnum extends LuceneTestCase { public void testReuseDocsEnumDifferentReader() throws IOException { Directory dir = newDirectory(); Codec cp = _TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); int numdocs = atLeast(20); - createRandomIndex(numdocs, writer, random); + createRandomIndex(numdocs, writer, random()); writer.commit(); DirectoryReader firstReader = DirectoryReader.open(dir); @@ -137,7 +137,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { DocsEnum docs = null; BytesRef term = null; while ((term = iterator.next()) != null) { - docs = iterator.docs(null, randomDocsEnum("body", term, sequentialSubReaders2, bits), random.nextBoolean()); + docs = iterator.docs(null, randomDocsEnum("body", term, sequentialSubReaders2, bits), random().nextBoolean()); enums.put(docs, true); } assertEquals(terms.size(), enums.size()); @@ -146,7 +146,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { enums.clear(); docs = null; while ((term = iterator.next()) != null) { - docs = iterator.docs(bits, randomDocsEnum("body", term, sequentialSubReaders2, bits), random.nextBoolean()); + docs = iterator.docs(bits, randomDocsEnum("body", term, sequentialSubReaders2, bits), random().nextBoolean()); enums.put(docs, true); } assertEquals(terms.size(), enums.size()); @@ -155,11 +155,11 @@ public class TestReuseDocsEnum extends LuceneTestCase { } public DocsEnum randomDocsEnum(String field, BytesRef term, IndexReader[] readers, Bits bits) throws IOException { - if (random.nextInt(10) == 0) { + if (random().nextInt(10) == 0) { return null; } - AtomicReader indexReader = (AtomicReader) readers[random.nextInt(readers.length)]; - return indexReader.termDocsEnum(bits, field, term, random.nextBoolean()); + AtomicReader indexReader = (AtomicReader) readers[random().nextInt(readers.length)]; + return indexReader.termDocsEnum(bits, field, term, random().nextBoolean()); } /** diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/values/TestDocValues.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/values/TestDocValues.java index e6cff5ba2f8..3a9b47a5894 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/values/TestDocValues.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/values/TestDocValues.java @@ -20,6 +20,7 @@ package org.apache.lucene.codecs.lucene40.values; import java.io.IOException; import java.io.Reader; import java.util.Comparator; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; @@ -69,18 +70,18 @@ public class TestDocValues extends LuceneTestCase { Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Bytes.getWriter(dir, "test", mode, fixedSize, COMP, trackBytes, newIOContext(random), - random.nextBoolean()); + DocValuesConsumer w = Bytes.getWriter(dir, "test", mode, fixedSize, COMP, trackBytes, newIOContext(random()), + random().nextBoolean()); int maxDoc = 220; final String[] values = new String[maxDoc]; final int fixedLength = 1 + atLeast(50); for (int i = 0; i < 100; i++) { final String s; - if (i > 0 && random.nextInt(5) <= 2) { + if (i > 0 && random().nextInt(5) <= 2) { // use prior value - s = values[2 * random.nextInt(i)]; + s = values[2 * random().nextInt(i)]; } else { - s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)); + s = _TestUtil.randomFixedByteLengthUnicodeString(random(), fixedSize? fixedLength : 1 + random().nextInt(39)); } values[2 * i] = s; @@ -91,7 +92,7 @@ public class TestDocValues extends LuceneTestCase { w.finish(maxDoc); assertEquals(0, trackBytes.get()); - DocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, COMP, newIOContext(random)); + DocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, COMP, newIOContext(random())); // Verify we can load source twice: for (int iter = 0; iter < 2; iter++) { @@ -123,6 +124,7 @@ public class TestDocValues extends LuceneTestCase { // Lookup random strings: if (mode == Bytes.Mode.SORTED) { final int valueCount = ss.getValueCount(); + Random random = random(); for (int i = 0; i < 1000; i++) { BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39))); int ord = ss.getOrdByValue(bytesValue, new BytesRef()); @@ -178,14 +180,14 @@ public class TestDocValues extends LuceneTestCase { for (int i = 0; i < minMax.length; i++) { Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.VAR_INTS, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.VAR_INTS, newIOContext(random())); valueHolder.numberValue = minMax[i][0]; w.add(0, valueHolder); valueHolder.numberValue = minMax[i][1]; w.add(1, valueHolder); w.finish(2); assertEquals(0, trackBytes.get()); - DocValues r = Ints.getValues(dir, "test", 2, Type.VAR_INTS, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", 2, Type.VAR_INTS, newIOContext(random())); Source source = getSource(r); assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1], expectedTypes[i], source.getType()); @@ -214,13 +216,13 @@ public class TestDocValues extends LuceneTestCase { byte[] sourceArray = new byte[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_8, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_8, newIOContext(random())); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = (long) sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_8, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_8, newIOContext(random())); Source source = r.getSource(); assertTrue(source.hasArray()); byte[] loaded = ((byte[])source.getArray()); @@ -237,13 +239,13 @@ public class TestDocValues extends LuceneTestCase { short[] sourceArray = new short[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_16, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_16, newIOContext(random())); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = (long) sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_16, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_16, newIOContext(random())); Source source = r.getSource(); assertTrue(source.hasArray()); short[] loaded = ((short[])source.getArray()); @@ -260,13 +262,13 @@ public class TestDocValues extends LuceneTestCase { long[] sourceArray = new long[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_64, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_64, newIOContext(random())); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_64, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_64, newIOContext(random())); Source source = r.getSource(); assertTrue(source.hasArray()); long[] loaded = ((long[])source.getArray()); @@ -283,13 +285,13 @@ public class TestDocValues extends LuceneTestCase { int[] sourceArray = new int[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_32, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_32, newIOContext(random())); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = (long) sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_32, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_32, newIOContext(random())); Source source = r.getSource(); assertTrue(source.hasArray()); int[] loaded = ((int[])source.getArray()); @@ -306,13 +308,13 @@ public class TestDocValues extends LuceneTestCase { float[] sourceArray = new float[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), Type.FLOAT_32); + DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random()), Type.FLOAT_32); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Floats.getValues(dir, "test", 3, newIOContext(random), Type.FLOAT_32); + DocValues r = Floats.getValues(dir, "test", 3, newIOContext(random()), Type.FLOAT_32); Source source = r.getSource(); assertTrue(source.hasArray()); float[] loaded = ((float[])source.getArray()); @@ -329,13 +331,13 @@ public class TestDocValues extends LuceneTestCase { double[] sourceArray = new double[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), Type.FLOAT_64); + DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random()), Type.FLOAT_64); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Floats.getValues(dir, "test", 3, newIOContext(random), Type.FLOAT_64); + DocValues r = Floats.getValues(dir, "test", 3, newIOContext(random()), Type.FLOAT_64); Source source = r.getSource(); assertTrue(source.hasArray()); double[] loaded = ((double[])source.getArray()); @@ -350,22 +352,22 @@ public class TestDocValues extends LuceneTestCase { private void testInts(Type type, int maxBit) throws IOException { DocValueHolder valueHolder = new DocValueHolder(); long maxV = 1; - final int NUM_VALUES = 333 + random.nextInt(333); + final int NUM_VALUES = 333 + random().nextInt(333); final long[] values = new long[NUM_VALUES]; for (int rx = 1; rx < maxBit; rx++, maxV *= 2) { Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, type, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, type, newIOContext(random())); for (int i = 0; i < NUM_VALUES; i++) { - final long v = random.nextLong() % (1 + maxV); + final long v = random().nextLong() % (1 + maxV); valueHolder.numberValue = values[i] = v; w.add(i, valueHolder); } - final int additionalDocs = 1 + random.nextInt(9); + final int additionalDocs = 1 + random().nextInt(9); w.finish(NUM_VALUES + additionalDocs); assertEquals(0, trackBytes.get()); - DocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs, type, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs, type, newIOContext(random())); for (int iter = 0; iter < 2; iter++) { Source s = getSource(r); assertEquals(type, s.getType()); @@ -388,20 +390,20 @@ public class TestDocValues extends LuceneTestCase { DocValueHolder valueHolder = new DocValueHolder(); Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), type); - final int NUM_VALUES = 777 + random.nextInt(777); + DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random()), type); + final int NUM_VALUES = 777 + random().nextInt(777); final double[] values = new double[NUM_VALUES]; for (int i = 0; i < NUM_VALUES; i++) { - final double v = type == Type.FLOAT_32 ? random.nextFloat() : random + final double v = type == Type.FLOAT_32 ? random().nextFloat() : random() .nextDouble(); valueHolder.numberValue = values[i] = v; w.add(i, valueHolder); } - final int additionalValues = 1 + random.nextInt(10); + final int additionalValues = 1 + random().nextInt(10); w.finish(NUM_VALUES + additionalValues); assertEquals(0, trackBytes.get()); - DocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random), type); + DocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random()), type); for (int iter = 0; iter < 2; iter++) { Source s = getSource(r); for (int i = 0; i < NUM_VALUES; i++) { @@ -419,7 +421,7 @@ public class TestDocValues extends LuceneTestCase { private Source getSource(DocValues values) throws IOException { // getSource uses cache internally - switch(random.nextInt(5)) { + switch(random().nextInt(5)) { case 3: return values.load(); case 2: diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java index 1b8e1d1713c..76c7fff7d77 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java @@ -96,7 +96,7 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase { public void testMergeUnusedPerFieldCodec() throws IOException { Directory dir = newDirectory(); IndexWriterConfig iwconf = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setCodec(new MockCodec()); + new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE).setCodec(new MockCodec()); IndexWriter writer = newWriter(dir, iwconf); addDocs(writer, 10); writer.commit(); @@ -123,7 +123,7 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase { System.out.println("TEST: make new index"); } IndexWriterConfig iwconf = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setCodec(new MockCodec()); + new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE).setCodec(new MockCodec()); iwconf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); //((LogMergePolicy) iwconf.getMergePolicy()).setMergeFactor(10); IndexWriter writer = newWriter(dir, iwconf); @@ -142,7 +142,7 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase { assertQuery(new Term("content", "aaa"), dir, 10); Lucene40Codec codec = (Lucene40Codec)iwconf.getCodec(); - iwconf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + iwconf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND).setCodec(codec); //((LogMergePolicy) iwconf.getMergePolicy()).setUseCompoundFile(false); //((LogMergePolicy) iwconf.getMergePolicy()).setMergeFactor(10); @@ -234,28 +234,28 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase { */ @Test public void testStressPerFieldCodec() throws IOException { - Directory dir = newDirectory(random); + Directory dir = newDirectory(random()); final int docsPerRound = 97; int numRounds = atLeast(1); for (int i = 0; i < numRounds; i++) { - int num = _TestUtil.nextInt(random, 30, 60); - IndexWriterConfig config = newIndexWriterConfig(random, - TEST_VERSION_CURRENT, new MockAnalyzer(random)); + int num = _TestUtil.nextInt(random(), 30, 60); + IndexWriterConfig config = newIndexWriterConfig(random(), + TEST_VERSION_CURRENT, new MockAnalyzer(random())); config.setOpenMode(OpenMode.CREATE_OR_APPEND); IndexWriter writer = newWriter(dir, config); for (int j = 0; j < docsPerRound; j++) { final Document doc = new Document(); for (int k = 0; k < num; k++) { FieldType customType = new FieldType(TextField.TYPE_UNSTORED); - customType.setTokenized(random.nextBoolean()); - customType.setOmitNorms(random.nextBoolean()); + customType.setTokenized(random().nextBoolean()); + customType.setOmitNorms(random().nextBoolean()); Field field = newField("" + k, _TestUtil - .randomRealisticUnicodeString(random, 128), customType); + .randomRealisticUnicodeString(random(), 128), customType); doc.add(field); } writer.addDocument(doc); } - if (random.nextBoolean()) { + if (random().nextBoolean()) { writer.forceMerge(1); } writer.commit(); diff --git a/lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java b/lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java index 9f94aed68cf..c48201cdee2 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java @@ -56,13 +56,13 @@ public class Test10KPulsings extends LuceneTestCase { File f = _TestUtil.getTempDir("10kpulsed"); MockDirectoryWrapper dir = newFSDirectory(f); dir.setCheckIndexOnClose(false); // we do this ourselves explicitly - RandomIndexWriter iw = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); Document document = new Document(); FieldType ft = new FieldType(TextField.TYPE_STORED); - switch(_TestUtil.nextInt(random, 0, 2)) { + switch(_TestUtil.nextInt(random(), 0, 2)) { case 0: ft.setIndexOptions(IndexOptions.DOCS_ONLY); break; case 1: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); break; default: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); break; @@ -87,7 +87,7 @@ public class Test10KPulsings extends LuceneTestCase { for (int i = 0; i < 10050; i++) { String expected = df.format(i); assertEquals(expected, te.next().utf8ToString()); - de = _TestUtil.docs(random, te, null, de, false); + de = _TestUtil.docs(random(), te, null, de, false); assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc()); } @@ -101,19 +101,19 @@ public class Test10KPulsings extends LuceneTestCase { */ public void test10kNotPulsed() throws Exception { // we always run this test with pulsing codec. - int freqCutoff = _TestUtil.nextInt(random, 1, 10); + int freqCutoff = _TestUtil.nextInt(random(), 1, 10); Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing40PostingsFormat(freqCutoff)); File f = _TestUtil.getTempDir("10knotpulsed"); MockDirectoryWrapper dir = newFSDirectory(f); dir.setCheckIndexOnClose(false); // we do this ourselves explicitly - RandomIndexWriter iw = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); Document document = new Document(); FieldType ft = new FieldType(TextField.TYPE_STORED); - switch(_TestUtil.nextInt(random, 0, 2)) { + switch(_TestUtil.nextInt(random(), 0, 2)) { case 0: ft.setIndexOptions(IndexOptions.DOCS_ONLY); break; case 1: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); break; default: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); break; @@ -145,7 +145,7 @@ public class Test10KPulsings extends LuceneTestCase { for (int i = 0; i < 10050; i++) { String expected = df.format(i); assertEquals(expected, te.next().utf8ToString()); - de = _TestUtil.docs(random, te, null, de, false); + de = _TestUtil.docs(random(), te, null, de, false); assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc()); } diff --git a/lucene/core/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java b/lucene/core/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java index f47667f1c38..5c2e92ff6db 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java @@ -48,8 +48,8 @@ public class TestPulsingReuse extends LuceneTestCase { // we always run this test with pulsing codec. Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing40PostingsFormat(1)); Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); Document doc = new Document(); doc.add(new Field("foo", "a b b c c c d e f g g h i i j j k", TextField.TYPE_UNSTORED)); iw.addDocument(doc); @@ -87,8 +87,8 @@ public class TestPulsingReuse extends LuceneTestCase { Codec cp = _TestUtil.alwaysPostingsFormat(new NestedPulsingPostingsFormat()); MockDirectoryWrapper dir = newDirectory(); dir.setCheckIndexOnClose(false); // will do this ourselves, custom codec - RandomIndexWriter iw = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); Document doc = new Document(); doc.add(new Field("foo", "a b b c c c d e f g g g h i i j j k l l m m m", TextField.TYPE_UNSTORED)); // note: the reuse is imperfect, here we would have 4 enums (lost reuse when we get an enum for 'm') diff --git a/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java b/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java index 7e8b29cf0c5..7b2a671de4a 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java @@ -51,7 +51,7 @@ public class TestBinaryDocument extends LuceneTestCase { /** add the doc to a ram index */ Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(doc); /** open a reader and fetch the document */ @@ -85,7 +85,7 @@ public class TestBinaryDocument extends LuceneTestCase { /** add the doc to a ram index */ Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(doc); /** open a reader and fetch the document */ diff --git a/lucene/core/src/test/org/apache/lucene/document/TestDocument.java b/lucene/core/src/test/org/apache/lucene/document/TestDocument.java index e23a94439f4..2e54a6aa3d1 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestDocument.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestDocument.java @@ -171,7 +171,7 @@ public class TestDocument extends LuceneTestCase { */ public void testGetValuesForIndexedDocument() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(makeDocumentWithFields()); IndexReader reader = writer.getReader(); @@ -256,7 +256,7 @@ public class TestDocument extends LuceneTestCase { doc.add(new Field("keyword", "test", StringField.TYPE_STORED)); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(doc); field.setStringValue("id2"); writer.addDocument(doc); @@ -299,7 +299,7 @@ public class TestDocument extends LuceneTestCase { // LUCENE-3682 public void testTransitionAPI() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new Field("stored", "abc", Field.Store.YES, Field.Index.NO)); @@ -363,7 +363,7 @@ public class TestDocument extends LuceneTestCase { public void testBoost() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); iwc.setMergePolicy(newLogMergePolicy()); IndexWriter iw = new IndexWriter(dir, iwc); Document doc = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java b/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java index 78787859e0b..38babbd8e25 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java @@ -46,7 +46,7 @@ public class Test2BPostings extends LuceneTestCase { dir.setCheckIndexOnClose(false); // don't double-checkindex IndexWriter w = new IndexWriter(dir, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH) .setRAMBufferSizeMB(256.0) .setMergeScheduler(new ConcurrentMergeScheduler()) diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java index 692252a228d..5f0b5401996 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java @@ -149,7 +149,7 @@ public class Test2BTerms extends LuceneTestCase { System.out.println("Starting Test2B"); final long TERM_COUNT = ((long) Integer.MAX_VALUE) + 100000000; - final int TERMS_PER_DOC = _TestUtil.nextInt(random, 100000, 1000000); + final int TERMS_PER_DOC = _TestUtil.nextInt(random(), 100000, 1000000); List savedTerms = null; @@ -161,7 +161,7 @@ public class Test2BTerms extends LuceneTestCase { if (true) { IndexWriter w = new IndexWriter(dir, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH) .setRAMBufferSizeMB(256.0) .setMergeScheduler(new ConcurrentMergeScheduler()) @@ -175,7 +175,7 @@ public class Test2BTerms extends LuceneTestCase { } Document doc = new Document(); - final MyTokenStream ts = new MyTokenStream(random, TERMS_PER_DOC); + final MyTokenStream ts = new MyTokenStream(random(), TERMS_PER_DOC); FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setIndexOptions(IndexOptions.DOCS_ONLY); @@ -227,13 +227,13 @@ public class Test2BTerms extends LuceneTestCase { System.out.println("TEST: findTerms"); final TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator(null); final List savedTerms = new ArrayList(); - int nextSave = _TestUtil.nextInt(random, 500000, 1000000); + int nextSave = _TestUtil.nextInt(random(), 500000, 1000000); BytesRef term; while((term = termsEnum.next()) != null) { if (--nextSave == 0) { savedTerms.add(BytesRef.deepCopyOf(term)); System.out.println("TEST: add " + term); - nextSave = _TestUtil.nextInt(random, 500000, 1000000); + nextSave = _TestUtil.nextInt(random(), 500000, 1000000); } } return savedTerms; @@ -246,7 +246,7 @@ public class Test2BTerms extends LuceneTestCase { TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator(null); boolean failed = false; for(int iter=0;iter<10*terms.size();iter++) { - final BytesRef term = terms.get(random.nextInt(terms.size())); + final BytesRef term = terms.get(random().nextInt(terms.size())); System.out.println("TEST: search " + term); final long t0 = System.currentTimeMillis(); final int count = s.search(new TermQuery(new Term("field", term)), 1).totalHits; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java index c6bb4a08b48..6284669de12 100755 --- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java @@ -70,7 +70,7 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = null; writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)) + new MockAnalyzer(random())) .setOpenMode(OpenMode.CREATE)); // add 100 documents addDocs(writer, 100); @@ -80,7 +80,7 @@ public class TestAddIndexes extends LuceneTestCase { writer = newWriter( aux, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.CREATE). setMergePolicy(newLogMergePolicy(false)) ); @@ -89,14 +89,14 @@ public class TestAddIndexes extends LuceneTestCase { assertEquals(40, writer.maxDoc()); writer.close(); - writer = newWriter(aux2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + writer = newWriter(aux2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); // add 50 documents in compound files addDocs2(writer, 50); assertEquals(50, writer.maxDoc()); writer.close(); // test doc count before segments are merged - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); assertEquals(100, writer.maxDoc()); writer.addIndexes(aux, aux2); assertEquals(190, writer.maxDoc()); @@ -111,14 +111,14 @@ public class TestAddIndexes extends LuceneTestCase { // now add another set in. Directory aux3 = newDirectory(); - writer = newWriter(aux3, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = newWriter(aux3, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); // add 40 documents addDocs(writer, 40); assertEquals(40, writer.maxDoc()); writer.close(); // test doc count before segments are merged - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); assertEquals(190, writer.maxDoc()); writer.addIndexes(aux3); assertEquals(230, writer.maxDoc()); @@ -132,7 +132,7 @@ public class TestAddIndexes extends LuceneTestCase { verifyTermDocs(dir, new Term("content", "bbb"), 50); // now fully merge it. - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); writer.close(); @@ -145,11 +145,11 @@ public class TestAddIndexes extends LuceneTestCase { // now add a single document Directory aux4 = newDirectory(); - writer = newWriter(aux4, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = newWriter(aux4, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); addDocs2(writer, 1); writer.close(); - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); assertEquals(230, writer.maxDoc()); writer.addIndexes(aux4); assertEquals(231, writer.maxDoc()); @@ -172,7 +172,7 @@ public class TestAddIndexes extends LuceneTestCase { Directory aux = newDirectory(); setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); writer.addIndexes(aux); // Adds 10 docs, then replaces them with another 10 @@ -208,7 +208,7 @@ public class TestAddIndexes extends LuceneTestCase { Directory aux = newDirectory(); setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); // Adds 10 docs, then replaces them with another 10 // docs, so 10 pending deletes: @@ -246,7 +246,7 @@ public class TestAddIndexes extends LuceneTestCase { Directory aux = newDirectory(); setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); // Adds 10 docs, then replaces them with another 10 // docs, so 10 pending deletes: @@ -286,7 +286,7 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = null; - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); // add 100 documents addDocs(writer, 100); assertEquals(100, writer.maxDoc()); @@ -294,7 +294,7 @@ public class TestAddIndexes extends LuceneTestCase { writer = newWriter( aux, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(1000). setMergePolicy(newLogMergePolicy(false)) @@ -304,7 +304,7 @@ public class TestAddIndexes extends LuceneTestCase { writer.close(); writer = newWriter( aux, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(1000). setMergePolicy(newLogMergePolicy(false)) @@ -312,7 +312,7 @@ public class TestAddIndexes extends LuceneTestCase { addDocs(writer, 100); writer.close(); - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); try { // cannot add self writer.addIndexes(aux, dir); @@ -342,7 +342,7 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = newWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(10). setMergePolicy(newLogMergePolicy(4)) @@ -371,7 +371,7 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = newWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(9). setMergePolicy(newLogMergePolicy(4)) @@ -400,13 +400,13 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = newWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(10). setMergePolicy(newLogMergePolicy(4)) ); - writer.addIndexes(aux, new MockDirectoryWrapper(random, new RAMDirectory(aux, newIOContext(random)))); + writer.addIndexes(aux, new MockDirectoryWrapper(random(), new RAMDirectory(aux, newIOContext(random())))); assertEquals(1060, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -426,7 +426,7 @@ public class TestAddIndexes extends LuceneTestCase { setUpDirs(dir, aux, true); - IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMergePolicy(NoMergePolicy.COMPOUND_FILES); IndexWriter writer = new IndexWriter(aux, dontMergeConfig); for (int i = 0; i < 20; i++) { @@ -439,7 +439,7 @@ public class TestAddIndexes extends LuceneTestCase { writer = newWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(4). setMergePolicy(newLogMergePolicy(4)) @@ -448,7 +448,7 @@ public class TestAddIndexes extends LuceneTestCase { if (VERBOSE) { System.out.println("\nTEST: now addIndexes"); } - writer.addIndexes(aux, new MockDirectoryWrapper(random, new RAMDirectory(aux, newIOContext(random)))); + writer.addIndexes(aux, new MockDirectoryWrapper(random(), new RAMDirectory(aux, newIOContext(random())))); assertEquals(1020, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -468,7 +468,7 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = newWriter( aux2, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(100). setMergePolicy(newLogMergePolicy(10)) @@ -478,7 +478,7 @@ public class TestAddIndexes extends LuceneTestCase { assertEquals(3, writer.getSegmentCount()); writer.close(); - IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMergePolicy(NoMergePolicy.COMPOUND_FILES); writer = new IndexWriter(aux, dontMergeConfig); for (int i = 0; i < 27; i++) { @@ -489,7 +489,7 @@ public class TestAddIndexes extends LuceneTestCase { assertEquals(3, reader.numDocs()); reader.close(); - dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMergePolicy(NoMergePolicy.COMPOUND_FILES); writer = new IndexWriter(aux2, dontMergeConfig); for (int i = 0; i < 8; i++) { @@ -502,7 +502,7 @@ public class TestAddIndexes extends LuceneTestCase { writer = newWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(6). setMergePolicy(newLogMergePolicy(4)) @@ -550,7 +550,7 @@ public class TestAddIndexes extends LuceneTestCase { private void verifyTermDocs(Directory dir, Term term, int numDocs) throws IOException { IndexReader reader = IndexReader.open(dir); - DocsEnum docsEnum = _TestUtil.docs(random, reader, term.field, term.bytes, null, null, false); + DocsEnum docsEnum = _TestUtil.docs(random(), reader, term.field, term.bytes, null, null, false); int count = 0; while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) count++; @@ -565,7 +565,7 @@ public class TestAddIndexes extends LuceneTestCase { private void setUpDirs(Directory dir, Directory aux, boolean withID) throws IOException { IndexWriter writer = null; - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000)); // add 1000 documents in 1 segment if (withID) { addDocsWithID(writer, 1000, 0); @@ -578,7 +578,7 @@ public class TestAddIndexes extends LuceneTestCase { writer = newWriter( aux, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(1000). setMergePolicy(newLogMergePolicy(false, 10)) @@ -593,7 +593,7 @@ public class TestAddIndexes extends LuceneTestCase { writer.close(); writer = newWriter( aux, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(1000). setMergePolicy(newLogMergePolicy(false, 10)) @@ -612,7 +612,7 @@ public class TestAddIndexes extends LuceneTestCase { lmp.setUseCompoundFile(false); lmp.setMergeFactor(100); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)) + TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(5).setMergePolicy(lmp)); Document doc = new Document(); @@ -641,7 +641,7 @@ public class TestAddIndexes extends LuceneTestCase { lmp.setUseCompoundFile(false); lmp.setMergeFactor(4); writer = new IndexWriter(dir2, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)) + new MockAnalyzer(random())) .setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(lmp)); writer.addIndexes(dir); writer.close(); @@ -672,16 +672,16 @@ public class TestAddIndexes extends LuceneTestCase { public RunAddIndexesThreads(int numCopy) throws Throwable { NUM_COPY = numCopy; - dir = new MockDirectoryWrapper(random, new RAMDirectory()); + dir = new MockDirectoryWrapper(random(), new RAMDirectory()); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)) + TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(2)); for (int i = 0; i < NUM_INIT_DOCS; i++) addDoc(writer); writer.close(); dir2 = newDirectory(); - writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer2.commit(); @@ -700,7 +700,7 @@ public class TestAddIndexes extends LuceneTestCase { final Directory[] dirs = new Directory[NUM_COPY]; for(int k=0;k= 1) { - final int inc = 1+LuceneTestCase.random.nextInt(left-1); + if (random().nextInt(3) == 1 && left >= 1) { + final int inc = 1+random().nextInt(left-1); upto2 += inc; - if (LuceneTestCase.random.nextInt(2) == 1) { + if (random().nextInt(2) == 1) { doc = docs.advance(term.docs[upto2]); assertEquals(term.docs[upto2], doc); } else { @@ -597,7 +597,7 @@ public class TestCodecs extends LuceneTestCase { assertEquals(term.docs[upto2], doc); if (!field.omitTF) { assertEquals(term.positions[upto2].length, postings.freq()); - if (LuceneTestCase.random.nextInt(2) == 1) { + if (random().nextInt(2) == 1) { this.verifyPositions(term.positions[upto2], postings); } } @@ -616,9 +616,9 @@ public class TestCodecs extends LuceneTestCase { private void write(final FieldInfos fieldInfos, final Directory dir, final FieldData[] fields, boolean allowPreFlex) throws Throwable { - final int termIndexInterval = _TestUtil.nextInt(random, 13, 27); + final int termIndexInterval = _TestUtil.nextInt(random(), 13, 27); final Codec codec = Codec.getDefault(); - final SegmentWriteState state = new SegmentWriteState(InfoStream.getDefault(), dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codec, null, newIOContext(random)); + final SegmentWriteState state = new SegmentWriteState(InfoStream.getDefault(), dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codec, null, newIOContext(random())); final FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(state); Arrays.sort(fields); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCompoundFile.java b/lucene/core/src/test/org/apache/lucene/index/TestCompoundFile.java index 56f03875b18..44eb4916e48 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestCompoundFile.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestCompoundFile.java @@ -54,7 +54,7 @@ public class TestCompoundFile extends LuceneTestCase private void createRandomFile(Directory dir, String name, int size) throws IOException { - IndexOutput os = dir.createOutput(name, newIOContext(random)); + IndexOutput os = dir.createOutput(name, newIOContext(random())); for (int i=0; i readers = Collections.synchronizedList(new ArrayList()); DirectoryReader firstReader = DirectoryReader.open(dir); DirectoryReader reader = firstReader; - final Random rnd = random; ReaderThread[] threads = new ReaderThread[n]; final Set readersToClose = Collections.synchronizedSet(new HashSet()); @@ -273,6 +268,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { @Override public void run() throws Exception { + Random rnd = LuceneTestCase.random(); while (!stopped) { if (index % 2 == 0) { // refresh reader synchronized @@ -301,7 +297,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { } } synchronized(this) { - wait(_TestUtil.nextInt(random, 1, 100)); + wait(_TestUtil.nextInt(random(), 1, 100)); } } } @@ -311,6 +307,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { task = new ReaderThreadTask() { @Override public void run() throws Exception { + Random rnd = LuceneTestCase.random(); while (!stopped) { int numReaders = readers.size(); if (numReaders > 0) { @@ -319,7 +316,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { } synchronized(this) { - wait(_TestUtil.nextInt(random, 1, 100)); + wait(_TestUtil.nextInt(random(), 1, 100)); } } } @@ -507,20 +504,20 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: modify index"); } - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); w.deleteDocuments(new Term("field2", "a11")); w.deleteDocuments(new Term("field2", "b30")); w.close(); break; } case 1: { - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); w.forceMerge(1); w.close(); break; } case 2: { - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); w.addDocument(createDocument(101, 4)); w.forceMerge(1); w.addDocument(createDocument(102, 4)); @@ -529,7 +526,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { break; } case 3: { - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); w.addDocument(createDocument(101, 4)); w.close(); break; @@ -587,7 +584,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setIndexDeletionPolicy(new KeepAllCommits()). setMaxBufferedDocs(-1). setMergePolicy(newLogMergePolicy(10)) diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java index b84bcad6962..65fcc042b2f 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java @@ -110,7 +110,7 @@ public class TestDoc extends LuceneTestCase { Directory directory = newFSDirectory(indexDir, null); IndexWriter writer = new IndexWriter( directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(-1). setMergePolicy(newLogMergePolicy(10)) @@ -145,7 +145,7 @@ public class TestDoc extends LuceneTestCase { directory = newFSDirectory(indexDir, null); writer = new IndexWriter( directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(-1). setMergePolicy(newLogMergePolicy(10)) @@ -189,7 +189,7 @@ public class TestDoc extends LuceneTestCase { private SegmentInfo merge(Directory dir, SegmentInfo si1, SegmentInfo si2, String merged, boolean useCompoundFile) throws Exception { - IOContext context = newIOContext(random); + IOContext context = newIOContext(random()); SegmentReader r1 = new SegmentReader(si1, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, context); SegmentReader r2 = new SegmentReader(si2, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, context); @@ -206,7 +206,7 @@ public class TestDoc extends LuceneTestCase { false, codec, fieldInfos); if (useCompoundFile) { - Collection filesToDelete = IndexWriter.createCompoundFile(dir, merged + ".cfs", MergeState.CheckAbort.NONE, info, newIOContext(random)); + Collection filesToDelete = IndexWriter.createCompoundFile(dir, merged + ".cfs", MergeState.CheckAbort.NONE, info, newIOContext(random())); info.setUseCompoundFile(true); for (final String fileToDelete : filesToDelete) si1.dir.deleteFile(fileToDelete); @@ -218,7 +218,7 @@ public class TestDoc extends LuceneTestCase { private void printSegment(PrintWriter out, SegmentInfo si) throws Exception { - SegmentReader reader = new SegmentReader(si, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentReader reader = new SegmentReader(si, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random())); for (int i = 0; i < reader.numDocs(); i++) out.println(reader.document(i)); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java b/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java index 9609b42d7c9..f81189c49b2 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java @@ -34,7 +34,7 @@ public class TestDocCount extends LuceneTestCase { assumeFalse("PreFlex codec does not support docCount statistic!", "Lucene3x".equals(Codec.getDefault().getName())); Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir); int numDocs = atLeast(100); for (int i = 0; i < numDocs; i++) { iw.addDocument(doc()); @@ -52,9 +52,9 @@ public class TestDocCount extends LuceneTestCase { private Document doc() { Document doc = new Document(); - int numFields = _TestUtil.nextInt(random, 1, 10); + int numFields = _TestUtil.nextInt(random(), 1, 10); for (int i = 0; i < numFields; i++) { - doc.add(newField("" + _TestUtil.nextInt(random, 'a', 'z'), "" + _TestUtil.nextInt(random, 'a', 'z'), StringField.TYPE_UNSTORED)); + doc.add(newField("" + _TestUtil.nextInt(random(), 'a', 'z'), "" + _TestUtil.nextInt(random(), 'a', 'z'), StringField.TYPE_UNSTORED)); } return doc; } @@ -75,7 +75,7 @@ public class TestDocCount extends LuceneTestCase { FixedBitSet visited = new FixedBitSet(ir.maxDoc()); TermsEnum te = terms.iterator(null); while (te.next() != null) { - DocsEnum de = _TestUtil.docs(random, te, null, null, false); + DocsEnum de = _TestUtil.docs(random(), te, null, null, false); while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { visited.set(de.docID()); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java b/lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java index b2e14c82cbb..a4739e66755 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java @@ -50,7 +50,7 @@ public class TestDocTermOrds extends LuceneTestCase { public void testSimple() throws Exception { Directory dir = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + final RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); Field field = newField("field", "", TextField.TYPE_UNSTORED); doc.add(field); @@ -96,7 +96,7 @@ public class TestDocTermOrds extends LuceneTestCase { final int NUM_TERMS = atLeast(20); final Set terms = new HashSet(); while(terms.size() < NUM_TERMS) { - final String s = _TestUtil.randomRealisticUnicodeString(random); + final String s = _TestUtil.randomRealisticUnicodeString(random()); //final String s = _TestUtil.randomSimpleString(random); if (s.length() > 0) { terms.add(new BytesRef(s)); @@ -107,16 +107,16 @@ public class TestDocTermOrds extends LuceneTestCase { final int NUM_DOCS = atLeast(100); - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); // Sometimes swap in codec that impls ord(): - if (random.nextInt(10) == 7) { + if (random().nextInt(10) == 7) { // Make sure terms index has ords: Codec codec = _TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene40WithOrds")); conf.setCodec(codec); } - final RandomIndexWriter w = new RandomIndexWriter(random, dir, conf); + final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf); final int[][] idToOrds = new int[NUM_DOCS][]; final Set ordsForDocSet = new HashSet(); @@ -126,9 +126,9 @@ public class TestDocTermOrds extends LuceneTestCase { doc.add(new IntField("id", id)); - final int termCount = _TestUtil.nextInt(random, 0, 20*RANDOM_MULTIPLIER); + final int termCount = _TestUtil.nextInt(random(), 0, 20*RANDOM_MULTIPLIER); while(ordsForDocSet.size() < termCount) { - ordsForDocSet.add(random.nextInt(termsArray.length)); + ordsForDocSet.add(random().nextInt(termsArray.length)); } final int[] ordsForDoc = new int[termCount]; int upto = 0; @@ -181,12 +181,12 @@ public class TestDocTermOrds extends LuceneTestCase { MockDirectoryWrapper dir = newDirectory(); final Set prefixes = new HashSet(); - final int numPrefix = _TestUtil.nextInt(random, 2, 7); + final int numPrefix = _TestUtil.nextInt(random(), 2, 7); if (VERBOSE) { System.out.println("TEST: use " + numPrefix + " prefixes"); } while(prefixes.size() < numPrefix) { - prefixes.add(_TestUtil.randomRealisticUnicodeString(random)); + prefixes.add(_TestUtil.randomRealisticUnicodeString(random())); //prefixes.add(_TestUtil.randomSimpleString(random)); } final String[] prefixesArray = prefixes.toArray(new String[prefixes.size()]); @@ -194,7 +194,7 @@ public class TestDocTermOrds extends LuceneTestCase { final int NUM_TERMS = atLeast(20); final Set terms = new HashSet(); while(terms.size() < NUM_TERMS) { - final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomRealisticUnicodeString(random); + final String s = prefixesArray[random().nextInt(prefixesArray.length)] + _TestUtil.randomRealisticUnicodeString(random()); //final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomSimpleString(random); if (s.length() > 0) { terms.add(new BytesRef(s)); @@ -205,15 +205,15 @@ public class TestDocTermOrds extends LuceneTestCase { final int NUM_DOCS = atLeast(100); - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); // Sometimes swap in codec that impls ord(): - if (random.nextInt(10) == 7) { + if (random().nextInt(10) == 7) { Codec codec = _TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene40WithOrds")); conf.setCodec(codec); } - final RandomIndexWriter w = new RandomIndexWriter(random, dir, conf); + final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf); final int[][] idToOrds = new int[NUM_DOCS][]; final Set ordsForDocSet = new HashSet(); @@ -223,9 +223,9 @@ public class TestDocTermOrds extends LuceneTestCase { doc.add(new IntField("id", id)); - final int termCount = _TestUtil.nextInt(random, 0, 20*RANDOM_MULTIPLIER); + final int termCount = _TestUtil.nextInt(random(), 0, 20*RANDOM_MULTIPLIER); while(ordsForDocSet.size() < termCount) { - ordsForDocSet.add(random.nextInt(termsArray.length)); + ordsForDocSet.add(random().nextInt(termsArray.length)); } final int[] ordsForDoc = new int[termCount]; int upto = 0; @@ -302,7 +302,7 @@ public class TestDocTermOrds extends LuceneTestCase { "field", prefixRef, Integer.MAX_VALUE, - _TestUtil.nextInt(random, 2, 10)); + _TestUtil.nextInt(random(), 2, 10)); final int[] docIDToID = FieldCache.DEFAULT.getInts(r, "id", false); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java index 35284fb4443..807943f8d0d 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java @@ -18,16 +18,9 @@ package org.apache.lucene.index; */ import java.io.Closeable; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; +import java.util.*; import java.util.Map.Entry; -import java.util.Map; -import java.util.Set; +import java.util.*; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; @@ -109,30 +102,30 @@ public class TestDocValuesIndexing extends LuceneTestCase { } public void testIndexBytesNoDeletes() throws IOException { - runTestIndexBytes(writerConfig(random.nextBoolean()), false); + runTestIndexBytes(writerConfig(random().nextBoolean()), false); } public void testIndexBytesDeletes() throws IOException { - runTestIndexBytes(writerConfig(random.nextBoolean()), true); + runTestIndexBytes(writerConfig(random().nextBoolean()), true); } public void testIndexNumericsNoDeletes() throws IOException { - runTestNumerics(writerConfig(random.nextBoolean()), false); + runTestNumerics(writerConfig(random().nextBoolean()), false); } public void testIndexNumericsDeletes() throws IOException { - runTestNumerics(writerConfig(random.nextBoolean()), true); + runTestNumerics(writerConfig(random().nextBoolean()), true); } public void testAddIndexes() throws IOException { int valuesPerIndex = 10; List values = Arrays.asList(Type.values()); - Collections.shuffle(values, random); + Collections.shuffle(values, random()); Type first = values.get(0); Type second = values.get(1); // index first index Directory d_1 = newDirectory(); - IndexWriter w_1 = new IndexWriter(d_1, writerConfig(random.nextBoolean())); + IndexWriter w_1 = new IndexWriter(d_1, writerConfig(random().nextBoolean())); indexValues(w_1, valuesPerIndex, first, values, false, 7); w_1.commit(); assertEquals(valuesPerIndex, w_1.maxDoc()); @@ -140,17 +133,17 @@ public class TestDocValuesIndexing extends LuceneTestCase { // index second index Directory d_2 = newDirectory(); - IndexWriter w_2 = new IndexWriter(d_2, writerConfig(random.nextBoolean())); + IndexWriter w_2 = new IndexWriter(d_2, writerConfig(random().nextBoolean())); indexValues(w_2, valuesPerIndex, second, values, false, 7); w_2.commit(); assertEquals(valuesPerIndex, w_2.maxDoc()); _TestUtil.checkIndex(d_2); Directory target = newDirectory(); - IndexWriter w = new IndexWriter(target, writerConfig(random.nextBoolean())); + IndexWriter w = new IndexWriter(target, writerConfig(random().nextBoolean())); DirectoryReader r_1 = DirectoryReader.open(w_1, true); DirectoryReader r_2 = DirectoryReader.open(w_2, true); - if (random.nextBoolean()) { + if (random().nextBoolean()) { w.addIndexes(d_1, d_2); } else { w.addIndexes(r_1, r_2); @@ -238,8 +231,8 @@ public class TestDocValuesIndexing extends LuceneTestCase { private IndexWriterConfig writerConfig(boolean useCompoundFile) { final IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); - cfg.setMergePolicy(newLogMergePolicy(random)); + new MockAnalyzer(random())); + cfg.setMergePolicy(newLogMergePolicy(random())); LogMergePolicy policy = new LogDocMergePolicy(); cfg.setMergePolicy(policy); policy.setUseCompoundFile(useCompoundFile); @@ -255,7 +248,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { final List numVariantList = new ArrayList(NUMERICS); // run in random order to test if fill works correctly during merges - Collections.shuffle(numVariantList, random); + Collections.shuffle(numVariantList, random()); for (Type val : numVariantList) { FixedBitSet deleted = indexValues(w, numValues, val, numVariantList, withDeletions, 7); @@ -331,7 +324,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { IndexWriter w = new IndexWriter(d, cfg); final List byteVariantList = new ArrayList(BYTES); // run in random order to test if fill works correctly during merges - Collections.shuffle(byteVariantList, random); + Collections.shuffle(byteVariantList, random()); final int numValues = 50 + atLeast(10); for (Type byteIndexValue : byteVariantList) { List closeables = new ArrayList(); @@ -414,11 +407,11 @@ public class TestDocValuesIndexing extends LuceneTestCase { public void testGetArrayNumerics() throws CorruptIndexException, IOException { Directory d = newDirectory(); - IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); IndexWriter w = new IndexWriter(d, cfg); final int numValues = 50 + atLeast(10); final List numVariantList = new ArrayList(NUMERICS); - Collections.shuffle(numVariantList, random); + Collections.shuffle(numVariantList, random()); for (Type val : numVariantList) { indexValues(w, numValues, val, numVariantList, false, 7); @@ -502,7 +495,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { public void testGetArrayBytes() throws CorruptIndexException, IOException { Directory d = newDirectory(); IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(random())); IndexWriter w = new IndexWriter(d, cfg); final int numValues = 50 + atLeast(10); // only single byte fixed straight supports getArray() @@ -542,7 +535,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { @SuppressWarnings("fallthrough") private Source getSource(DocValues values) throws IOException { // getSource uses cache internally - switch(random.nextInt(5)) { + switch(random().nextInt(5)) { case 3: return values.load(); case 2: @@ -656,17 +649,17 @@ public class TestDocValuesIndexing extends LuceneTestCase { w.addDocument(doc); if (i % 7 == 0) { - if (withDeletions && random.nextBoolean()) { - Type val = valueVarList.get(random.nextInt(1 + valueVarList + if (withDeletions && random().nextBoolean()) { + Type val = valueVarList.get(random().nextInt(1 + valueVarList .indexOf(valueType))); - final int randInt = val == valueType ? random.nextInt(1 + i) : random + final int randInt = val == valueType ? random().nextInt(1 + i) : random() .nextInt(numValues); w.deleteDocuments(new Term("id", val.name() + "_" + randInt)); if (val == valueType) { deleted.set(randInt); } } - if (random.nextInt(10) == 0) { + if (random().nextInt(10) == 0) { w.commit(); } } @@ -674,7 +667,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { w.commit(); // TODO test multi seg with deletions - if (withDeletions || random.nextBoolean()) { + if (withDeletions || random().nextBoolean()) { w.forceMerge(1, true); } return deleted; @@ -682,7 +675,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { public void testMultiValuedDocValuesField() throws Exception { Directory d = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, d); + RandomIndexWriter w = new RandomIndexWriter(random(), d); Document doc = new Document(); DocValuesField f = new DocValuesField("field", 17, Type.VAR_INTS); // Index doc values are single-valued so we should not @@ -709,7 +702,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { public void testDifferentTypedDocValuesField() throws Exception { Directory d = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, d); + RandomIndexWriter w = new RandomIndexWriter(random(), d); Document doc = new Document(); // Index doc values are single-valued so we should not // be able to add same field more than once: @@ -740,17 +733,17 @@ public class TestDocValuesIndexing extends LuceneTestCase { boolean fixed = type == Type.BYTES_FIXED_SORTED; final Directory d = newDirectory(); IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(random())); IndexWriter w = new IndexWriter(d, cfg); int numDocs = atLeast(100); BytesRefHash hash = new BytesRefHash(); Map docToString = new HashMap(); - int len = 1 + random.nextInt(50); + int len = 1 + random().nextInt(50); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(newField("id", "" + i, TextField.TYPE_STORED)); - String string =fixed ? _TestUtil.randomFixedByteLengthUnicodeString(random, - len) : _TestUtil.randomRealisticUnicodeString(random, 1, len); + String string =fixed ? _TestUtil.randomFixedByteLengthUnicodeString(random(), + len) : _TestUtil.randomRealisticUnicodeString(random(), 1, len); BytesRef br = new BytesRef(string); doc.add(new DocValuesField("field", br, type)); hash.add(br); @@ -777,8 +770,8 @@ public class TestDocValuesIndexing extends LuceneTestCase { Document doc = new Document(); String id = "" + i + numDocs; doc.add(newField("id", id, TextField.TYPE_STORED)); - String string = fixed ? _TestUtil.randomFixedByteLengthUnicodeString(random, - len) : _TestUtil.randomRealisticUnicodeString(random, 1, len); + String string = fixed ? _TestUtil.randomFixedByteLengthUnicodeString(random(), + len) : _TestUtil.randomRealisticUnicodeString(random(), 1, len); BytesRef br = new BytesRef(string); hash.add(br); docToString.put(id, string); @@ -826,6 +819,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { } public void testWithThreads() throws Exception { + Random random = random(); final int NUM_DOCS = atLeast(100); final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random, dir); @@ -883,12 +877,13 @@ public class TestDocValuesIndexing extends LuceneTestCase { final DocValues.Source docIDToID = sr.docValues("id").getSource(); - final int NUM_THREADS = _TestUtil.nextInt(random, 1, 10); + final int NUM_THREADS = _TestUtil.nextInt(random(), 1, 10); Thread[] threads = new Thread[NUM_THREADS]; for(int thread=0;thread= maxDoc) { @@ -267,8 +267,8 @@ public class TestDocsAndPositions extends LuceneTestCase { */ public void testLargeNumberOfPositions() throws IOException { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); int howMany = 1000; FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setOmitNorms(true); @@ -304,10 +304,10 @@ public class TestDocsAndPositions extends LuceneTestCase { int initDoc = 0; int maxDoc = atomicReaderContext.reader().maxDoc(); // initially advance or do next doc - if (random.nextBoolean()) { + if (random().nextBoolean()) { initDoc = docsAndPosEnum.nextDoc(); } else { - initDoc = docsAndPosEnum.advance(random.nextInt(maxDoc)); + initDoc = docsAndPosEnum.advance(random().nextInt(maxDoc)); } String msg = "Iteration: " + i + " initDoc: " + initDoc; // TODO: + " payloads: " + usePayload; assertEquals(howMany / 2, docsAndPosEnum.freq()); @@ -324,13 +324,13 @@ public class TestDocsAndPositions extends LuceneTestCase { public void testDocsEnumStart() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newField("foo", "bar", StringField.TYPE_UNSTORED)); writer.addDocument(doc); DirectoryReader reader = writer.getReader(); AtomicReader r = getOnlySegmentReader(reader); - DocsEnum disi = _TestUtil.docs(random, r, "foo", new BytesRef("bar"), null, null, false); + DocsEnum disi = _TestUtil.docs(random(), r, "foo", new BytesRef("bar"), null, null, false); int docid = disi.docID(); assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS); assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); @@ -338,7 +338,7 @@ public class TestDocsAndPositions extends LuceneTestCase { // now reuse and check again TermsEnum te = r.terms("foo").iterator(null); assertTrue(te.seekExact(new BytesRef("bar"), true)); - disi = _TestUtil.docs(random, te, null, disi, false); + disi = _TestUtil.docs(random(), te, null, disi, false); docid = disi.docID(); assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS); assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); @@ -349,7 +349,7 @@ public class TestDocsAndPositions extends LuceneTestCase { public void testDocsAndPositionsEnumStart() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newField("foo", "bar", TextField.TYPE_UNSTORED)); writer.addDocument(doc); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java index 512bb31e167..b47067ffdd7 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java @@ -59,13 +59,13 @@ public class TestDocumentWriter extends LuceneTestCase { public void testAddDocument() throws Exception { Document testDoc = new Document(); DocHelper.setupDoc(testDoc); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.addDocument(testDoc); writer.commit(); SegmentInfo info = writer.newestSegment(); writer.close(); //After adding the document, we should be able to read it back in - SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random())); assertTrue(reader != null); Document doc = reader.document(0); assertTrue(doc != null); @@ -126,7 +126,7 @@ public class TestDocumentWriter extends LuceneTestCase { writer.commit(); SegmentInfo info = writer.newestSegment(); writer.close(); - SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random())); DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader), "repeated", new BytesRef("repeated"), false); @@ -198,7 +198,7 @@ public class TestDocumentWriter extends LuceneTestCase { writer.commit(); SegmentInfo info = writer.newestSegment(); writer.close(); - SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random())); DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"), false); assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); @@ -216,7 +216,7 @@ public class TestDocumentWriter extends LuceneTestCase { public void testPreAnalyzedField() throws IOException { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); doc.add(new TextField("preanalyzed", new TokenStream() { @@ -242,7 +242,7 @@ public class TestDocumentWriter extends LuceneTestCase { writer.commit(); SegmentInfo info = writer.newestSegment(); writer.close(); - SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random())); DocsAndPositionsEnum termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term1"), false); assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); @@ -280,7 +280,7 @@ public class TestDocumentWriter extends LuceneTestCase { doc.add(newField("f2", "v2", StringField.TYPE_STORED)); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.addDocument(doc); writer.close(); @@ -320,7 +320,7 @@ public class TestDocumentWriter extends LuceneTestCase { doc.add(newField("f2", "v2", customType2)); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.addDocument(doc); writer.forceMerge(1); // be sure to have a single segment writer.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java index ec21cc873db..ba43d4d842e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java @@ -36,10 +36,10 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { public void testUpdateDelteSlices() { DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue(); - final int size = 200 + random.nextInt(500) * RANDOM_MULTIPLIER; + final int size = 200 + random().nextInt(500) * RANDOM_MULTIPLIER; Integer[] ids = new Integer[size]; for (int i = 0; i < ids.length; i++) { - ids[i] = random.nextInt(); + ids[i] = random().nextInt(); } DeleteSlice slice1 = queue.newSlice(); DeleteSlice slice2 = queue.newSlice(); @@ -54,14 +54,14 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { Term[] term = new Term[] {new Term("id", i.toString())}; uniqueValues.add(term[0]); queue.addDelete(term); - if (random.nextInt(20) == 0 || j == ids.length - 1) { + if (random().nextInt(20) == 0 || j == ids.length - 1) { queue.updateSlice(slice1); assertTrue(slice1.isTailItem(term)); slice1.apply(bd1, j); assertAllBetween(last1, j, bd1, ids); last1 = j + 1; } - if (random.nextInt(10) == 5 || j == ids.length - 1) { + if (random().nextInt(10) == 5 || j == ids.length - 1) { queue.updateSlice(slice2); assertTrue(slice2.isTailItem(term)); slice2.apply(bd2, j); @@ -96,12 +96,12 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { assertFalse(queue.anyChanges()); queue.clear(); assertFalse(queue.anyChanges()); - final int size = 200 + random.nextInt(500) * RANDOM_MULTIPLIER; + final int size = 200 + random().nextInt(500) * RANDOM_MULTIPLIER; int termsSinceFreeze = 0; int queriesSinceFreeze = 0; for (int i = 0; i < size; i++) { Term term = new Term("id", "" + i); - if (random.nextInt(10) == 0) { + if (random().nextInt(10) == 0) { queue.addDelete(new TermQuery(term)); queriesSinceFreeze++; } else { @@ -109,7 +109,7 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { termsSinceFreeze++; } assertTrue(queue.anyChanges()); - if (random.nextInt(10) == 0) { + if (random().nextInt(10) == 0) { queue.clear(); queue.tryApplyGlobalSlice(); assertFalse(queue.anyChanges()); @@ -120,12 +120,12 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { public void testAnyChanges() { DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue(); - final int size = 200 + random.nextInt(500) * RANDOM_MULTIPLIER; + final int size = 200 + random().nextInt(500) * RANDOM_MULTIPLIER; int termsSinceFreeze = 0; int queriesSinceFreeze = 0; for (int i = 0; i < size; i++) { Term term = new Term("id", "" + i); - if (random.nextInt(10) == 0) { + if (random().nextInt(10) == 0) { queue.addDelete(new TermQuery(term)); queriesSinceFreeze++; } else { @@ -133,7 +133,7 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { termsSinceFreeze++; } assertTrue(queue.anyChanges()); - if (random.nextInt(5) == 0) { + if (random().nextInt(5) == 0) { FrozenBufferedDeletes freezeGlobalBuffer = queue .freezeGlobalBuffer(null); assertEquals(termsSinceFreeze, freezeGlobalBuffer.termCount); @@ -174,15 +174,15 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { public void testStressDeleteQueue() throws InterruptedException { DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue(); Set uniqueValues = new HashSet(); - final int size = 10000 + random.nextInt(500) * RANDOM_MULTIPLIER; + final int size = 10000 + random().nextInt(500) * RANDOM_MULTIPLIER; Integer[] ids = new Integer[size]; for (int i = 0; i < ids.length; i++) { - ids[i] = random.nextInt(); + ids[i] = random().nextInt(); uniqueValues.add(new Term("id", ids[i].toString())); } CountDownLatch latch = new CountDownLatch(1); AtomicInteger index = new AtomicInteger(0); - final int numThreads = 2 + random.nextInt(5); + final int numThreads = 2 + random().nextInt(5); UpdateThread[] threads = new UpdateThread[numThreads]; for (int i = 0; i < threads.length; i++) { threads[i] = new UpdateThread(queue, index, ids, latch); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java index ed7d5f4b75f..bbf90e3952a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java @@ -68,11 +68,11 @@ public class TestDuelingCodecs extends LuceneTestCase { // so this would make assertEquals complicated. leftCodec = Codec.forName("SimpleText"); - rightCodec = new RandomCodec(random, false); + rightCodec = new RandomCodec(random(), false); leftDir = newDirectory(); rightDir = newDirectory(); - long seed = random.nextLong(); + long seed = random().nextLong(); // must use same seed because of random payloads, etc Analyzer leftAnalyzer = new MockAnalyzer(new Random(seed)); @@ -212,7 +212,7 @@ public class TestDuelingCodecs extends LuceneTestCase { if (deep) { int numIntersections = atLeast(3); for (int i = 0; i < numIntersections; i++) { - String re = AutomatonTestUtil.randomRegexp(random); + String re = AutomatonTestUtil.randomRegexp(random()); CompiledAutomaton automaton = new CompiledAutomaton(new RegExp(re, RegExp.NONE).toAutomaton()); if (automaton.type == CompiledAutomaton.AUTOMATON_TYPE.NORMAL) { // TODO: test start term too @@ -249,7 +249,7 @@ public class TestDuelingCodecs extends LuceneTestCase { */ public void assertTermsEnum(TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws Exception { BytesRef term; - Bits randomBits = new RandomBits(leftReader.maxDoc(), random.nextDouble(), random); + Bits randomBits = new RandomBits(leftReader.maxDoc(), random().nextDouble(), random()); DocsAndPositionsEnum leftPositions = null; DocsAndPositionsEnum rightPositions = null; DocsEnum leftDocs = null; @@ -383,13 +383,13 @@ public class TestDuelingCodecs extends LuceneTestCase { int skipInterval = 16; while (true) { - if (random.nextBoolean()) { + if (random().nextBoolean()) { // nextDoc() docid = leftDocs.nextDoc(); assertEquals(info, docid, rightDocs.nextDoc()); } else { // advance() - int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random.nextGaussian() * averageGap)); + int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap)); docid = leftDocs.advance(skip); assertEquals(info, docid, rightDocs.advance(skip)); } @@ -418,13 +418,13 @@ public class TestDuelingCodecs extends LuceneTestCase { int skipInterval = 16; while (true) { - if (random.nextBoolean()) { + if (random().nextBoolean()) { // nextDoc() docid = leftDocs.nextDoc(); assertEquals(info, docid, rightDocs.nextDoc()); } else { // advance() - int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random.nextGaussian() * averageGap)); + int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap)); docid = leftDocs.advance(skip); assertEquals(info, docid, rightDocs.advance(skip)); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFieldInfos.java b/lucene/core/src/test/org/apache/lucene/index/TestFieldInfos.java index ac7393a6ea9..6dbc43bbe55 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestFieldInfos.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestFieldInfos.java @@ -52,7 +52,7 @@ public class TestFieldInfos extends LuceneTestCase { assertTrue(fieldInfos.size() == DocHelper.all.size()); //this is all b/c we are using the no-arg constructor - IndexOutput output = dir.createOutput(filename, newIOContext(random)); + IndexOutput output = dir.createOutput(filename, newIOContext(random())); assertTrue(output != null); //Use a RAMOutputStream @@ -120,34 +120,34 @@ public class TestFieldInfos extends LuceneTestCase { } try { - readOnly.addOrUpdate("bogus", random.nextBoolean()); + readOnly.addOrUpdate("bogus", random().nextBoolean()); fail("instance should be read only"); } catch (IllegalStateException e) { // expected } try { - readOnly.addOrUpdate("bogus", random.nextBoolean(), random.nextBoolean()); + readOnly.addOrUpdate("bogus", random().nextBoolean(), random().nextBoolean()); fail("instance should be read only"); } catch (IllegalStateException e) { // expected } try { - readOnly.addOrUpdate("bogus", random.nextBoolean(), random.nextBoolean(), - random.nextBoolean()); + readOnly.addOrUpdate("bogus", random().nextBoolean(), random().nextBoolean(), + random().nextBoolean()); fail("instance should be read only"); } catch (IllegalStateException e) { // expected } try { - readOnly.addOrUpdate("bogus", random.nextBoolean(), random.nextBoolean(), - random.nextBoolean(), - random.nextBoolean(), random.nextBoolean() ? IndexOptions.DOCS_ONLY : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, null, null); + readOnly.addOrUpdate("bogus", random().nextBoolean(), random().nextBoolean(), + random().nextBoolean(), + random().nextBoolean(), random().nextBoolean() ? IndexOptions.DOCS_ONLY : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, null, null); fail("instance should be read only"); } catch (IllegalStateException e) { // expected } try { - readOnly.addOrUpdate(Arrays.asList("a", "b", "c"), random.nextBoolean()); + readOnly.addOrUpdate(Arrays.asList("a", "b", "c"), random().nextBoolean()); fail("instance should be read only"); } catch (IllegalStateException e) { // expected diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java index 861da45c525..44727d5cfec 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java @@ -57,7 +57,7 @@ public class TestFieldsReader extends LuceneTestCase { DocHelper.setupDoc(testDoc); _TestUtil.add(testDoc, fieldInfos); dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()); ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(false); IndexWriter writer = new IndexWriter(dir, conf); writer.addDocument(testDoc); @@ -195,7 +195,7 @@ public class TestFieldsReader extends LuceneTestCase { try { Directory dir = new FaultyFSDirectory(indexDir); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); for(int i=0;i<2;i++) writer.addDocument(testDoc); writer.forceMerge(1); @@ -232,7 +232,7 @@ public class TestFieldsReader extends LuceneTestCase { public void testNumericField() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); final int numDocs = atLeast(500); final Number[] answers = new Number[numDocs]; final NumericType[] typeAnswers = new NumericType[numDocs]; @@ -242,16 +242,16 @@ public class TestFieldsReader extends LuceneTestCase { final Field sf; final Number answer; final NumericType typeAnswer; - if (random.nextBoolean()) { + if (random().nextBoolean()) { // float/double - if (random.nextBoolean()) { - final float f = random.nextFloat(); + if (random().nextBoolean()) { + final float f = random().nextFloat(); answer = Float.valueOf(f); nf = new FloatField("nf", f); sf = new StoredField("nf", f); typeAnswer = NumericType.FLOAT; } else { - final double d = random.nextDouble(); + final double d = random().nextDouble(); answer = Double.valueOf(d); nf = new DoubleField("nf", d); sf = new StoredField("nf", d); @@ -259,14 +259,14 @@ public class TestFieldsReader extends LuceneTestCase { } } else { // int/long - if (random.nextBoolean()) { - final int i = random.nextInt(); + if (random().nextBoolean()) { + final int i = random().nextInt(); answer = Integer.valueOf(i); nf = new IntField("nf", i); sf = new StoredField("nf", i); typeAnswer = NumericType.INT; } else { - final long l = random.nextLong(); + final long l = random().nextLong(); answer = Long.valueOf(l); nf = new LongField("nf", l); sf = new StoredField("nf", l); @@ -302,7 +302,7 @@ public class TestFieldsReader extends LuceneTestCase { public void testIndexedBit() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); FieldType onlyStored = new FieldType(); onlyStored.setStored(true); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java index eedb7f09cfc..55a3880d35d 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java @@ -129,7 +129,7 @@ public class TestFilterAtomicReader extends LuceneTestCase { public void testFilterIndexReader() throws Exception { Directory directory = newDirectory(); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document d1 = new Document(); d1.add(newField("default","one two", TextField.TYPE_STORED)); @@ -150,7 +150,7 @@ public class TestFilterAtomicReader extends LuceneTestCase { // We mess with the postings so this can fail: ((MockDirectoryWrapper) target).setCrossCheckTermVectorsOnClose(false); - writer = new IndexWriter(target, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(target, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); IndexReader reader = new TestReader(IndexReader.open(directory)); writer.addIndexes(reader); writer.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFlex.java b/lucene/core/src/test/org/apache/lucene/index/TestFlex.java index e60d8f38659..0c7be517740 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestFlex.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestFlex.java @@ -33,7 +33,7 @@ public class TestFlex extends LuceneTestCase { IndexWriter w = new IndexWriter( d, - new IndexWriterConfig(Version.LUCENE_31, new MockAnalyzer(random)). + new IndexWriterConfig(Version.LUCENE_31, new MockAnalyzer(random())). setMaxBufferedDocs(7) ); @@ -65,7 +65,7 @@ public class TestFlex extends LuceneTestCase { public void testTermOrd() throws Exception { Directory d = newDirectory(); IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()))); + new MockAnalyzer(random())).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()))); Document doc = new Document(); doc.add(newField("f", "a b c", TextField.TYPE_UNSTORED)); w.addDocument(doc); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java index 4715c581347..d3fc7844808 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java @@ -39,7 +39,7 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { - lineDocFile = new LineFileDocs(random, defaultCodecSupportsDocValues()); + lineDocFile = new LineFileDocs(random(), defaultCodecSupportsDocValues()); } @AfterClass @@ -51,14 +51,14 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { public void testFlushByRam() throws CorruptIndexException, LockObtainFailedException, IOException, InterruptedException { final double ramBuffer = (TEST_NIGHTLY ? 1 : 10) + atLeast(2) - + random.nextDouble(); - runFlushByRam(1 + random.nextInt(TEST_NIGHTLY ? 5 : 1), ramBuffer, false); + + random().nextDouble(); + runFlushByRam(1 + random().nextInt(TEST_NIGHTLY ? 5 : 1), ramBuffer, false); } public void testFlushByRamLargeBuffer() throws CorruptIndexException, LockObtainFailedException, IOException, InterruptedException { // with a 256 mb ram buffer we should never stall - runFlushByRam(1 + random.nextInt(TEST_NIGHTLY ? 5 : 1), 256.d, true); + runFlushByRam(1 + random().nextInt(TEST_NIGHTLY ? 5 : 1), 256.d, true); } protected void runFlushByRam(int numThreads, double maxRamMB, @@ -69,7 +69,7 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { Directory dir = newDirectory(); MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy(); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setFlushPolicy(flushPolicy); + new MockAnalyzer(random())).setFlushPolicy(flushPolicy); final int numDWPT = 1 + atLeast(2); DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool( numDWPT); @@ -125,7 +125,7 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { Directory dir = newDirectory(); MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy(); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setFlushPolicy(flushPolicy); + new MockAnalyzer(random())).setFlushPolicy(flushPolicy); final int numDWPT = 1 + atLeast(2); DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool( @@ -168,16 +168,16 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { } public void testRandom() throws IOException, InterruptedException { - final int numThreads = 1 + random.nextInt(8); + final int numThreads = 1 + random().nextInt(8); final int numDocumentsToIndex = 50 + atLeast(70); AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex); Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(random())); MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy(); iwc.setFlushPolicy(flushPolicy); - final int numDWPT = 1 + random.nextInt(8); + final int numDWPT = 1 + random().nextInt(8); DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool( numDWPT); iwc.setIndexerThreadPool(threadPool); @@ -230,15 +230,15 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { public void testStallControl() throws InterruptedException, CorruptIndexException, LockObtainFailedException, IOException { - int[] numThreads = new int[] { 4 + random.nextInt(8), 1 }; - final int numDocumentsToIndex = 50 + random.nextInt(50); + int[] numThreads = new int[] { 4 + random().nextInt(8), 1 }; + final int numDocumentsToIndex = 50 + random().nextInt(50); for (int i = 0; i < numThreads.length; i++) { AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex); MockDirectoryWrapper dir = newDirectory(); // mock a very slow harddisk sometimes here so that flushing is very slow dir.setThrottling(MockDirectoryWrapper.Throttling.SOMETIMES); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(random())); iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); iwc.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH); FlushPolicy flushPolicy = new FlushByRamOrCountsPolicy(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java b/lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java index 54b2c9457c4..182e5264dbc 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java @@ -39,13 +39,13 @@ public class TestForTooMuchCloning extends LuceneTestCase { final MockDirectoryWrapper dir = newDirectory(); final TieredMergePolicy tmp = new TieredMergePolicy(); tmp.setMaxMergeAtOnce(2); - final RandomIndexWriter w = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setMergePolicy(tmp)); + final RandomIndexWriter w = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergePolicy(tmp)); final int numDocs = 20; for(int docs=0;docs 0) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexInput.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexInput.java index 2d809f2f074..6f023643957 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexInput.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexInput.java @@ -29,6 +29,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import java.io.IOException; +import java.util.Random; public class TestIndexInput extends LuceneTestCase { @@ -85,6 +86,7 @@ public class TestIndexInput extends LuceneTestCase { @BeforeClass public static void beforeClass() throws IOException { + Random random = random(); INTS = new int[COUNT]; LONGS = new long[COUNT]; RANDOM_TEST_BYTES = new byte[COUNT * (5 + 4 + 9 + 8)]; @@ -177,6 +179,7 @@ public class TestIndexInput extends LuceneTestCase { // this test checks the raw IndexInput methods as it uses RAMIndexInput which extends IndexInput directly public void testRawIndexInputRead() throws IOException { + Random random = random(); final RAMDirectory dir = new RAMDirectory(); IndexOutput os = dir.createOutput("foo", newIOContext(random)); os.writeBytes(READ_TEST_BYTES, READ_TEST_BYTES.length); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java index 55ad2cf8692..c84f27bf224 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -79,7 +79,7 @@ public class TestIndexWriter extends LuceneTestCase { try { IndexWriterConfig.setDefaultWriteLockTimeout(2000); assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout()); - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); } finally { IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout); } @@ -92,7 +92,7 @@ public class TestIndexWriter extends LuceneTestCase { writer.close(); // delete 40 documents - writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)); + writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)); for (i = 0; i < 40; i++) { writer.deleteDocuments(new Term("id", ""+i)); } @@ -103,7 +103,7 @@ public class TestIndexWriter extends LuceneTestCase { reader.close(); // merge the index down and check that the new doc count is correct - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); assertEquals(60, writer.numDocs()); writer.forceMerge(1); assertEquals(60, writer.maxDoc()); @@ -118,7 +118,7 @@ public class TestIndexWriter extends LuceneTestCase { // make sure opening a new index for create over // this existing one works correctly: - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); assertEquals(0, writer.maxDoc()); assertEquals(0, writer.numDocs()); writer.close(); @@ -144,7 +144,7 @@ public class TestIndexWriter extends LuceneTestCase { public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException { String[] startFiles = dir.listAll(); - new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))).rollback(); + new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))).rollback(); String[] endFiles = dir.listAll(); Arrays.sort(startFiles); @@ -173,7 +173,7 @@ public class TestIndexWriter extends LuceneTestCase { Directory dir = newDirectory(); // add one document & close writer - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); addDoc(writer); writer.close(); @@ -182,7 +182,7 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals("should be one document", reader.numDocs(), 1); // now open index for create: - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); assertEquals("should be zero documents", writer.maxDoc(), 0); addDoc(writer); writer.close(); @@ -201,7 +201,7 @@ public class TestIndexWriter extends LuceneTestCase { IndexWriter writer = null; - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); addDoc(writer); // close @@ -219,7 +219,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testIndexNoDocuments() throws IOException { MockDirectoryWrapper dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.commit(); writer.close(); @@ -228,7 +228,7 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals(0, reader.numDocs()); reader.close(); - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); writer.commit(); writer.close(); @@ -241,7 +241,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testManyFields() throws IOException { MockDirectoryWrapper dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(10)); for(int j=0;j<100;j++) { Document doc = new Document(); doc.add(newField("a"+j, "aaa" + j, storedTextType)); @@ -273,7 +273,7 @@ public class TestIndexWriter extends LuceneTestCase { MockDirectoryWrapper dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())). setRAMBufferSizeMB(0.000001). setMergePolicy(newLogMergePolicy(10)) ); @@ -296,7 +296,7 @@ public class TestIndexWriter extends LuceneTestCase { // maxBufferedDocs in a write session public void testChangingRAMBuffer() throws IOException { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.getConfig().setMaxBufferedDocs(10); writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH); @@ -350,7 +350,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testChangingRAMBuffer2() throws IOException { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.getConfig().setMaxBufferedDocs(10); writer.getConfig().setMaxBufferedDeleteTerms(10); writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH); @@ -411,7 +411,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testDiverseDocs() throws IOException { MockDirectoryWrapper dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setRAMBufferSizeMB(0.5)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setRAMBufferSizeMB(0.5)); int n = atLeast(1); for(int i=0;i docs = new ArrayList(); docs.add(new Document()); @@ -1806,7 +1806,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testPrepareCommitThenClose() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); w.prepareCommit(); try { @@ -1827,7 +1827,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testPrepareCommitThenRollback() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); w.prepareCommit(); w.rollback(); @@ -1839,7 +1839,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testPrepareCommitThenRollback2() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); w.commit(); w.addDocument(new Document()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java index 9b94bc9e5f2..dedfa9d54ef 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java @@ -44,7 +44,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { */ public void testCommitOnClose() throws IOException { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for (int i = 0; i < 14; i++) { TestIndexWriter.addDoc(writer); } @@ -59,7 +59,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { reader = IndexReader.open(dir); - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for(int i=0;i<3;i++) { for(int j=0;j<11;j++) { TestIndexWriter.addDoc(writer); @@ -95,7 +95,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { */ public void testCommitOnCloseAbort() throws IOException { MockDirectoryWrapper dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(10)); for (int i = 0; i < 14; i++) { TestIndexWriter.addDoc(writer); } @@ -108,7 +108,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { assertEquals("first number of hits", 14, hits.length); reader.close(); - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10)); for(int j=0;j<17;j++) { TestIndexWriter.addDoc(writer); @@ -135,7 +135,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { // Now make sure we can re-open the index, add docs, // and all is good: - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10)); // On abort, writer in fact may write to the same @@ -182,7 +182,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { assumeFalse("This test cannot run with Memory codec", idFormat.equals("Memory") || contentFormat.equals("Memory")); MockDirectoryWrapper dir = newDirectory(); Analyzer analyzer; - if (random.nextBoolean()) { + if (random().nextBoolean()) { // no payloads analyzer = new Analyzer() { @Override @@ -192,12 +192,12 @@ public class TestIndexWriterCommit extends LuceneTestCase { }; } else { // fixed length payloads - final int length = random.nextInt(200); + final int length = random().nextInt(200); analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true); - return new TokenStreamComponents(tokenizer, new MockFixedLengthPayloadFilter(random, tokenizer, length)); + return new TokenStreamComponents(tokenizer, new MockFixedLengthPayloadFilter(random(), tokenizer, length)); } }; } @@ -266,7 +266,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { dir.setPreventDoubleWrite(false); IndexWriter writer = new IndexWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMaxBufferedDocs(10). setMergePolicy(newLogMergePolicy(10)) ); @@ -275,7 +275,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { } writer.close(); - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); // Open a reader before closing (commiting) the writer: @@ -300,7 +300,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: do real full merge"); } - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); writer.close(); @@ -325,8 +325,8 @@ public class TestIndexWriterCommit extends LuceneTestCase { final int NUM_THREADS = 5; final double RUN_SEC = 0.5; final Directory dir = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + final RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); _TestUtil.reduceOpenFiles(w.w); w.commit(); final AtomicBoolean failed = new AtomicBoolean(); @@ -381,7 +381,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { IndexWriter writer = new IndexWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMaxBufferedDocs(2). setMergePolicy(newLogMergePolicy(5)) ); @@ -418,7 +418,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { public void testFutureCommit() throws Exception { Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE)); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE)); Document doc = new Document(); w.addDocument(doc); @@ -444,7 +444,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { assertNotNull(commit); - w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit)); + w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit)); assertEquals(1, w.numDocs()); @@ -473,7 +473,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { // changed since LUCENE-2386, where before IW would always commit on a fresh // new index. Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); try { DirectoryReader.listCommits(dir); fail("listCommits should have thrown an exception over empty index"); @@ -492,7 +492,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { IndexWriter writer = new IndexWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMaxBufferedDocs(2). setMergePolicy(newLogMergePolicy(5)) ); @@ -549,7 +549,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { IndexWriter writer = new IndexWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMaxBufferedDocs(2). setMergePolicy(newLogMergePolicy(5)) ); @@ -575,7 +575,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { reader.close(); reader2.close(); - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for (int i = 0; i < 17; i++) TestIndexWriter.addDoc(writer); @@ -601,7 +601,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { public void testPrepareCommitNoChanges() throws IOException { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.prepareCommit(); writer.commit(); writer.close(); @@ -615,7 +615,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { // LUCENE-1382 public void testCommitUserData() throws IOException { Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2)); for(int j=0;j<17;j++) TestIndexWriter.addDoc(w); w.close(); @@ -625,7 +625,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { assertEquals(0, r.getIndexCommit().getUserData().size()); r.close(); - w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); + w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2)); for(int j=0;j<17;j++) TestIndexWriter.addDoc(w); Map data = new HashMap(); @@ -637,7 +637,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { assertEquals("test1", r.getIndexCommit().getUserData().get("label")); r.close(); - w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); w.forceMerge(1); w.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java index 334a389c7a0..20e9dae187b 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java @@ -51,7 +51,7 @@ public class TestIndexWriterConfig extends LuceneTestCase { @Test public void testDefaults() throws Exception { - IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); assertEquals(MockAnalyzer.class, conf.getAnalyzer().getClass()); assertNull(conf.getIndexCommit()); assertEquals(KeepOnlyLastCommitDeletionPolicy.class, conf.getIndexDeletionPolicy().getClass()); @@ -138,7 +138,7 @@ public class TestIndexWriterConfig extends LuceneTestCase { @Test public void testToString() throws Exception { - String str = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).toString(); + String str = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).toString(); for (Field f : IndexWriterConfig.class.getDeclaredFields()) { int modifiers = f.getModifiers(); if (Modifier.isStatic(modifiers) && Modifier.isFinal(modifiers)) { @@ -155,7 +155,7 @@ public class TestIndexWriterConfig extends LuceneTestCase { @Test public void testClone() throws Exception { - IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); IndexWriterConfig clone = conf.clone(); // Clone is shallow since not all parameters are cloneable. @@ -167,7 +167,7 @@ public class TestIndexWriterConfig extends LuceneTestCase { @Test public void testInvalidValues() throws Exception { - IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); // Test IndexDeletionPolicy assertEquals(KeepOnlyLastCommitDeletionPolicy.class, conf.getIndexDeletionPolicy().getClass()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java index 526094ee401..d7db99a8ded 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java @@ -52,7 +52,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1)); + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1)); FieldType custom1 = new FieldType(); custom1.setStored(true); @@ -91,7 +91,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); int id = 0; int value = 100; @@ -124,7 +124,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testMaxBufferedDeletes() throws IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1)); + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1)); writer.addDocument(new Document()); writer.deleteDocuments(new Term("foobar", "1")); @@ -143,7 +143,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { } Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(4) + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(4) .setMaxBufferedDeleteTerms(4)); int id = 0; int value = 100; @@ -181,7 +181,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testBothDeletes() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(100) + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(100) .setMaxBufferedDeleteTerms(100)); int id = 0; @@ -215,7 +215,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testBatchDeletes() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); int id = 0; @@ -258,7 +258,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAll() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); int id = 0; @@ -304,7 +304,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAllRollback() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); int id = 0; @@ -341,7 +341,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAllNRT() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); int id = 0; @@ -429,7 +429,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { MockDirectoryWrapper startDir = newDirectory(); // TODO: find the resource leak that only occurs sometimes here. startDir.setNoDeleteOpenFile(false); - IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); for (int i = 0; i < 157; i++) { Document d = new Document(); d.add(newField("id", Integer.toString(i), StringField.TYPE_STORED)); @@ -450,11 +450,11 @@ public class TestIndexWriterDelete extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: cycle"); } - MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir, newIOContext(random))); + MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new RAMDirectory(startDir, newIOContext(random()))); dir.setPreventDoubleWrite(false); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(1000) .setMaxBufferedDeleteTerms(1000) .setMergeScheduler(new ConcurrentMergeScheduler())); @@ -691,7 +691,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { MockDirectoryWrapper dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy())); + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) modifier.getConfig().getMergePolicy(); lmp.setUseCompoundFile(true); @@ -815,7 +815,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { String[] text = { "Amsterdam", "Venice" }; MockDirectoryWrapper dir = newDirectory(); - IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); modifier.commit(); dir.failOn(failure.reset()); @@ -845,7 +845,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteNullQuery() throws IOException { Directory dir = newDirectory(); - IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); for (int i = 0; i < 5; i++) { addDoc(modifier, i, 2*i); @@ -860,23 +860,23 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAllSlowly() throws Exception { final Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); final int NUM_DOCS = atLeast(1000); final List ids = new ArrayList(NUM_DOCS); for(int id=0;id doFail = new ThreadLocal(); private class MockIndexWriter extends IndexWriter { - Random r = new Random(random.nextLong()); + Random r = new Random(random().nextLong()); public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException { super(dir, conf); @@ -223,7 +223,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { } MockDirectoryWrapper dir = newDirectory(); - MockAnalyzer analyzer = new MockAnalyzer(random); + MockAnalyzer analyzer = new MockAnalyzer(random()); analyzer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases. MockIndexWriter writer = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer) .setRAMBufferSizeMB(0.1).setMergeScheduler(new ConcurrentMergeScheduler())); @@ -266,7 +266,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testRandomExceptionsThreads() throws Throwable { MockDirectoryWrapper dir = newDirectory(); - MockAnalyzer analyzer = new MockAnalyzer(random); + MockAnalyzer analyzer = new MockAnalyzer(random()); analyzer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases. MockIndexWriter writer = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer) .setRAMBufferSizeMB(0.2).setMergeScheduler(new ConcurrentMergeScheduler())); @@ -354,7 +354,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testExceptionDocumentsWriterInit() throws IOException { Directory dir = newDirectory(); - MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); @@ -372,7 +372,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { // LUCENE-1208 public void testExceptionJustBeforeFlush() throws IOException { Directory dir = newDirectory(); - MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); + MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2)); Document doc = new Document(); doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); @@ -422,7 +422,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { // LUCENE-1210 public void testExceptionOnMergeInit() throws IOException { Directory dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) + IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy()); ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2); MockIndexWriter3 w = new MockIndexWriter3(dir, conf); @@ -501,7 +501,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { // Make sure the doc that hit the exception was marked // as deleted: - DocsEnum tdocs = _TestUtil.docs(random, reader, + DocsEnum tdocs = _TestUtil.docs(random(), reader, t.field(), new BytesRef(t.text()), MultiFields.getLiveDocs(reader), @@ -561,7 +561,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { failure.setDoFail(); dir.failOn(failure); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2)); Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; doc.add(newField("content", contents, TextField.TYPE_UNSTORED)); @@ -698,7 +698,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(-1) .setMergePolicy( - random.nextBoolean() ? NoMergePolicy.COMPOUND_FILES + random().nextBoolean() ? NoMergePolicy.COMPOUND_FILES : NoMergePolicy.NO_COMPOUND_FILES)); // don't use a merge policy here they depend on the DWPThreadPool and its max thread states etc. final int finalI = i; @@ -824,7 +824,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { IndexWriter writer = new IndexWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMaxBufferedDocs(2). setMergeScheduler(new ConcurrentMergeScheduler()). setMergePolicy(newLogMergePolicy(5)) @@ -907,7 +907,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { MockDirectoryWrapper dir = newDirectory(); dir.setFailOnCreateOutput(false); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); @@ -929,7 +929,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testForceMergeExceptions() throws IOException { Directory startDir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()); + IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()); ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100); IndexWriter w = new IndexWriter(startDir, conf); for(int i=0;i<27;i++) @@ -941,8 +941,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: iter " + i); } - MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir, newIOContext(random))); - conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new ConcurrentMergeScheduler()); + MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new RAMDirectory(startDir, newIOContext(random()))); + conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergeScheduler(new ConcurrentMergeScheduler()); ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions(); w = new IndexWriter(dir, conf); dir.setRandomIOExceptionRate(0.5); @@ -965,7 +965,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { final AtomicBoolean thrown = new AtomicBoolean(false); final Directory dir = newDirectory(); final IndexWriter writer = new IndexWriter(dir, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new InfoStream() { + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setInfoStream(new InfoStream() { @Override public void message(String component, final String message) { if (message.startsWith("now flush at close") && thrown.compareAndSet(false, true)) { @@ -1013,7 +1013,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { // LUCENE-1347 public void testRollbackExceptionHang() throws Throwable { Directory dir = newDirectory(); - MockIndexWriter4 w = new MockIndexWriter4(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + MockIndexWriter4 w = new MockIndexWriter4(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); addDoc(w); w.doFail = true; @@ -1035,7 +1035,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { IndexWriter writer = null; - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); // add 100 documents for (int i = 0; i < 100; i++) { @@ -1049,8 +1049,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase { assertTrue("segment generation should be > 0 but got " + gen, gen > 0); final String segmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(dir); - IndexInput in = dir.openInput(segmentsFileName, newIOContext(random)); - IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen), newIOContext(random)); + IndexInput in = dir.openInput(segmentsFileName, newIOContext(random())); + IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen), newIOContext(random())); out.copyBytes(in, in.length()-1); byte b = in.readByte(); out.writeByte((byte) (1+b)); @@ -1077,7 +1077,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { IndexWriter writer = null; - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); // add 100 documents for (int i = 0; i < 100; i++) { @@ -1094,8 +1094,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase { String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen); - IndexInput in = dir.openInput(fileNameIn, newIOContext(random)); - IndexOutput out = dir.createOutput(fileNameOut, newIOContext(random)); + IndexInput in = dir.openInput(fileNameIn, newIOContext(random())); + IndexOutput out = dir.createOutput(fileNameOut, newIOContext(random())); long length = in.length(); for(int i=0;i docs = new ArrayList(); - final int numDocs2 = random.nextInt(25); + final int numDocs2 = random().nextInt(25); for(int docCount=0;docCount=1 && freq <= 4); for(int pos=0;pos=1 && freq <= 4); for(int pos=0;pos=1 && freq <= 4); } @@ -412,9 +412,9 @@ public class TestLongPostings extends LuceneTestCase { // advance final int targetDocID; if (docID == -1) { - targetDocID = random.nextInt(NUM_DOCS+1); + targetDocID = random().nextInt(NUM_DOCS+1); } else { - targetDocID = docID + _TestUtil.nextInt(random, 1, NUM_DOCS - docID); + targetDocID = docID + _TestUtil.nextInt(random(), 1, NUM_DOCS - docID); } if (VERBOSE) { System.out.println("TEST: docID=" + docID + "; do advance(" + targetDocID + ")"); @@ -440,7 +440,7 @@ public class TestLongPostings extends LuceneTestCase { break; } - if (random.nextInt(6) == 3 && postings != null) { + if (random().nextInt(6) == 3 && postings != null) { final int freq = postings.freq(); assertTrue("got invalid freq=" + freq, freq >=1 && freq <= 4); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java b/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java index 66637c51033..8f160ebab0f 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java @@ -46,9 +46,9 @@ public class TestMaxTermFrequency extends LuceneTestCase { super.setUp(); dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()); + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()); config.setSimilarity(new TestSimilarity()); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config); Document doc = new Document(); Field foo = newField("foo", "", TextField.TYPE_UNSTORED); doc.add(foo); @@ -81,16 +81,16 @@ public class TestMaxTermFrequency extends LuceneTestCase { */ private String addValue() { List terms = new ArrayList(); - int maxCeiling = _TestUtil.nextInt(random, 0, 255); + int maxCeiling = _TestUtil.nextInt(random(), 0, 255); int max = 0; for (char ch = 'a'; ch <= 'z'; ch++) { - int num = _TestUtil.nextInt(random, 0, maxCeiling); + int num = _TestUtil.nextInt(random(), 0, maxCeiling); for (int i = 0; i < num; i++) terms.add(Character.toString(ch)); max = Math.max(max, num); } expected.add(max); - Collections.shuffle(terms, random); + Collections.shuffle(terms, random()); return Arrays.toString(terms.toArray(new String[terms.size()])); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java index 533e56f734c..a20f0996672 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java @@ -49,8 +49,8 @@ public class TestMixedCodecs extends LuceneTestCase { System.out.println("TEST: " + docUpto + " of " + NUM_DOCS); } if (docsLeftInThisSegment == 0) { - final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); - if (random.nextBoolean()) { + final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); + if (random().nextBoolean()) { // Make sure we aggressively mix in SimpleText // since it has different impls for all codec // formats... @@ -59,8 +59,8 @@ public class TestMixedCodecs extends LuceneTestCase { if (w != null) { w.close(); } - w = new RandomIndexWriter(random, dir, iwc); - docsLeftInThisSegment = _TestUtil.nextInt(random, 10, 100); + w = new RandomIndexWriter(random(), dir, iwc); + docsLeftInThisSegment = _TestUtil.nextInt(random(), 10, 100); } final Document doc = new Document(); doc.add(newField("id", String.valueOf(docUpto), StringField.TYPE_STORED)); @@ -76,11 +76,11 @@ public class TestMixedCodecs extends LuceneTestCase { // Random delete half the docs: final Set deleted = new HashSet(); while(deleted.size() < NUM_DOCS/2) { - final Integer toDelete = random.nextInt(NUM_DOCS); + final Integer toDelete = random().nextInt(NUM_DOCS); if (!deleted.contains(toDelete)) { deleted.add(toDelete); w.deleteDocuments(new Term("id", String.valueOf(toDelete))); - if (random.nextInt(17) == 6) { + if (random().nextInt(17) == 6) { final IndexReader r = w.getReader(); assertEquals(NUM_DOCS - deleted.size(), r.numDocs()); r.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java index e7fe8bac0c3..facbc058590 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java @@ -36,34 +36,34 @@ public class TestMultiFields extends LuceneTestCase { Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); _TestUtil.keepFullyDeletedSegments(w); Map> docs = new HashMap>(); Set deleted = new HashSet(); List terms = new ArrayList(); - int numDocs = _TestUtil.nextInt(random, 1, 100 * RANDOM_MULTIPLIER); + int numDocs = _TestUtil.nextInt(random(), 1, 100 * RANDOM_MULTIPLIER); Document doc = new Document(); Field f = newField("field", "", StringField.TYPE_UNSTORED); doc.add(f); Field id = newField("id", "", StringField.TYPE_UNSTORED); doc.add(id); - boolean onlyUniqueTerms = random.nextBoolean(); + boolean onlyUniqueTerms = random().nextBoolean(); if (VERBOSE) { System.out.println("TEST: onlyUniqueTerms=" + onlyUniqueTerms + " numDocs=" + numDocs); } Set uniqueTerms = new HashSet(); for(int i=0;i 0) { + if (!onlyUniqueTerms && random().nextBoolean() && terms.size() > 0) { // re-use existing term - BytesRef term = terms.get(random.nextInt(terms.size())); + BytesRef term = terms.get(random().nextInt(terms.size())); docs.get(term).add(i); f.setStringValue(term.utf8ToString()); } else { - String s = _TestUtil.randomUnicodeString(random, 10); + String s = _TestUtil.randomUnicodeString(random(), 10); BytesRef term = new BytesRef(s); if (!docs.containsKey(term)) { docs.put(term, new ArrayList()); @@ -75,11 +75,11 @@ public class TestMultiFields extends LuceneTestCase { } id.setStringValue(""+i); w.addDocument(doc); - if (random.nextInt(4) == 1) { + if (random().nextInt(4) == 1) { w.commit(); } - if (i > 0 && random.nextInt(20) == 1) { - int delID = random.nextInt(i); + if (i > 0 && random().nextInt(20) == 1) { + int delID = random().nextInt(i); deleted.add(delID); w.deleteDocuments(new Term("id", ""+delID)); if (VERBOSE) { @@ -116,12 +116,12 @@ public class TestMultiFields extends LuceneTestCase { } for(int i=0;i<100;i++) { - BytesRef term = terms.get(random.nextInt(terms.size())); + BytesRef term = terms.get(random().nextInt(terms.size())); if (VERBOSE) { System.out.println("TEST: seek term="+ UnicodeUtil.toHexString(term.utf8ToString()) + " " + term); } - DocsEnum docsEnum = _TestUtil.docs(random, reader, "field", term, liveDocs, null, false); + DocsEnum docsEnum = _TestUtil.docs(random(), reader, "field", term, liveDocs, null, false); assertNotNull(docsEnum); for(int docID : docs.get(term)) { @@ -154,7 +154,7 @@ public class TestMultiFields extends LuceneTestCase { public void testSeparateEnums() throws Exception { Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document d = new Document(); d.add(newField("f", "j", StringField.TYPE_UNSTORED)); w.addDocument(d); @@ -162,8 +162,8 @@ public class TestMultiFields extends LuceneTestCase { w.addDocument(d); IndexReader r = w.getReader(); w.close(); - DocsEnum d1 = _TestUtil.docs(random, r, "f", new BytesRef("j"), null, null, false); - DocsEnum d2 = _TestUtil.docs(random, r, "f", new BytesRef("j"), null, null, false); + DocsEnum d1 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, false); + DocsEnum d2 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, false); assertEquals(0, d1.nextDoc()); assertEquals(0, d2.nextDoc()); r.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java index 9d94e37afa7..820e86a2998 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java @@ -48,7 +48,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase { class CountingRAMDirectory extends MockDirectoryWrapper { public CountingRAMDirectory(Directory delegate) { - super(random, delegate); + super(random(), delegate); } @Override diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java index 9a991ad3abb..0b698670eee 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java @@ -32,7 +32,7 @@ public class TestNRTReaderWithThreads extends LuceneTestCase { Directory mainDir = newDirectory(); IndexWriter writer = new IndexWriter( mainDir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMaxBufferedDocs(10). setMergePolicy(newLogMergePolicy(false,2)) ); @@ -76,7 +76,7 @@ public class TestNRTReaderWithThreads extends LuceneTestCase { int delCount = 0; int addCount = 0; int type; - final Random r = new Random(random.nextLong()); + final Random r = new Random(random().nextLong()); public RunThread(int type, IndexWriter writer) { this.type = type; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNRTThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestNRTThreads.java index 7d7e2fc48c2..c13c93bf4b3 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNRTThreads.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNRTThreads.java @@ -39,7 +39,7 @@ public class TestNRTThreads extends ThreadedIndexingAndSearchingTestCase { DirectoryReader r = IndexReader.open(writer, true); while (System.currentTimeMillis() < stopTime && !failed.get()) { - if (random.nextBoolean()) { + if (random().nextBoolean()) { if (VERBOSE) { System.out.println("TEST: now reopen r=" + r); } @@ -106,7 +106,7 @@ public class TestNRTThreads extends ThreadedIndexingAndSearchingTestCase { @Override protected IndexSearcher getFinalSearcher() throws Exception { final IndexReader r2; - if (random.nextBoolean()) { + if (random().nextBoolean()) { r2 = writer.getReader(); } else { writer.commit(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java index d76831aa03c..f31f63a727e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java @@ -42,15 +42,15 @@ public class TestNeverDelete extends LuceneTestCase { // them. This is still worth running on Windows since // some files the IR opens and closes. d.setNoDeleteOpenFile(false); - final RandomIndexWriter w = new RandomIndexWriter(random, + final RandomIndexWriter w = new RandomIndexWriter(random(), d, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)) + new MockAnalyzer(random())) .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE)); - w.w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random, 5, 30)); + w.w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random(), 5, 30)); w.commit(); - Thread[] indexThreads = new Thread[random.nextInt(4)]; + Thread[] indexThreads = new Thread[random().nextInt(4)]; final long stopTime = System.currentTimeMillis() + atLeast(1000); for (int x=0; x < indexThreads.length; x++) { indexThreads[x] = new Thread() { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNewestSegment.java b/lucene/core/src/test/org/apache/lucene/index/TestNewestSegment.java index 56ec3f38af7..1ea965dbaa0 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNewestSegment.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNewestSegment.java @@ -24,7 +24,7 @@ import org.apache.lucene.util.LuceneTestCase; public class TestNewestSegment extends LuceneTestCase { public void testNewestSegment() throws Exception { Directory directory = newDirectory(); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); assertNull(writer.newestSegment()); writer.close(); directory.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java index 2517eac327c..2c0de0603c7 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java @@ -69,7 +69,7 @@ public class TestNoDeletionPolicy extends LuceneTestCase { public void testAllCommitsRemain() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)) + TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE)); for (int i = 0; i < 10; i++) { Document doc = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java index eff3061085a..bc870fb6480 100755 --- a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java @@ -18,6 +18,7 @@ package org.apache.lucene.index; */ import java.io.IOException; +import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -59,9 +60,9 @@ public class TestNorms extends LuceneTestCase { // LUCENE-1260 public void testCustomEncoder() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); config.setSimilarity(new CustomNormEncodingSimilarity()); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config); Document doc = new Document(); Field foo = newField("foo", "", TextField.TYPE_UNSTORED); Field bar = newField("bar", "", TextField.TYPE_UNSTORED); @@ -115,11 +116,11 @@ public class TestNorms extends LuceneTestCase { */ public void testNormsNotPresent() throws IOException { Directory dir = newDirectory(); - boolean firstWriteNorm = random.nextBoolean(); + boolean firstWriteNorm = random().nextBoolean(); buildIndex(dir, firstWriteNorm); Directory otherDir = newDirectory(); - boolean secondWriteNorm = random.nextBoolean(); + boolean secondWriteNorm = random().nextBoolean(); buildIndex(otherDir, secondWriteNorm); AtomicReader reader = SlowCompositeReaderWrapper.wrap(IndexReader.open(otherDir)); @@ -134,8 +135,8 @@ public class TestNorms extends LuceneTestCase { } IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); + new MockAnalyzer(random())); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config); writer.addIndexes(reader); AtomicReader mergedReader = SlowCompositeReaderWrapper.wrap(writer.getReader()); if (!firstWriteNorm && !secondWriteNorm) { @@ -173,8 +174,9 @@ public class TestNorms extends LuceneTestCase { public void buildIndex(Directory dir, boolean writeNorms) throws IOException, CorruptIndexException { + Random random = random(); IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(random())); Similarity provider = new MySimProvider(writeNorms); config.setSimilarity(provider); RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); @@ -182,7 +184,7 @@ public class TestNorms extends LuceneTestCase { int num = atLeast(100); for (int i = 0; i < num; i++) { Document doc = docs.nextDoc(); - int boost = writeNorms ? 1 + random.nextInt(255) : 0; + int boost = writeNorms ? 1 + random().nextInt(255) : 0; Field f = new Field(byteTestField, "" + boost, TextField.TYPE_STORED); f.setBoost(boost); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java index cedae879740..f44c88c62cb 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java @@ -34,7 +34,7 @@ public class TestOmitNorms extends LuceneTestCase { // omitNorms bit in the FieldInfo public void testOmitNorms() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document d = new Document(); @@ -79,7 +79,7 @@ public class TestOmitNorms extends LuceneTestCase { // omitNorms for the same field works public void testMixedMerge() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter( ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer). @@ -134,7 +134,7 @@ public class TestOmitNorms extends LuceneTestCase { // field, public void testMixedRAM() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter( ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer). @@ -188,7 +188,7 @@ public class TestOmitNorms extends LuceneTestCase { // Verifies no *.nrm exists when all fields omit norms: public void testNoNrmFile() throws Throwable { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); @@ -265,10 +265,10 @@ public class TestOmitNorms extends LuceneTestCase { * Indexes at least 1 document with f1, and at least 1 document with f2. * returns the norms for "field". */ - static byte[] getNorms(String field, Field f1, Field f2) throws IOException { + byte[] getNorms(String field, Field f1, Field f2) throws IOException { Directory dir = newDirectory(); - IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()); - RandomIndexWriter riw = new RandomIndexWriter(random, dir, iwc); + IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()); + RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc); // add f1 Document d = new Document(); @@ -281,10 +281,10 @@ public class TestOmitNorms extends LuceneTestCase { riw.addDocument(d); // add a mix of f1's and f2's - int numExtraDocs = _TestUtil.nextInt(random, 1, 1000); + int numExtraDocs = _TestUtil.nextInt(random(), 1, 1000); for (int i = 0; i < numExtraDocs; i++) { d = new Document(); - d.add(random.nextBoolean() ? f1 : f2); + d.add(random().nextBoolean() ? f1 : f2); riw.addDocument(d); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java index 9f2a45c94f7..c3b1cb2298d 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java @@ -38,7 +38,7 @@ public class TestOmitPositions extends LuceneTestCase { public void testBasic() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_UNSTORED); ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); @@ -53,7 +53,7 @@ public class TestOmitPositions extends LuceneTestCase { assertNull(MultiFields.getTermPositionsEnum(reader, null, "foo", new BytesRef("test"), false)); - DocsEnum de = _TestUtil.docs(random, reader, "foo", new BytesRef("test"), null, null, true); + DocsEnum de = _TestUtil.docs(random(), reader, "foo", new BytesRef("test"), null, null, true); while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { assertEquals(2, de.freq()); } @@ -66,7 +66,7 @@ public class TestOmitPositions extends LuceneTestCase { // omitTermFreqAndPositions bit in the FieldInfo public void testPositions() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); Document d = new Document(); @@ -189,7 +189,7 @@ public class TestOmitPositions extends LuceneTestCase { // Verifies no *.prx exists when all fields omit term positions: public void testNoPrxFile() throws Throwable { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java index 121ab321cda..b90c16747f7 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java @@ -61,7 +61,7 @@ public class TestOmitTf extends LuceneTestCase { // omitTermFreqAndPositions bit in the FieldInfo public void testOmitTermFreqAndPositions() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); Document d = new Document(); @@ -106,7 +106,7 @@ public class TestOmitTf extends LuceneTestCase { // omitTermFreqAndPositions for the same field works public void testMixedMerge() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter( ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer). @@ -159,7 +159,7 @@ public class TestOmitTf extends LuceneTestCase { // field, public void testMixedRAM() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter( ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer). @@ -208,7 +208,7 @@ public class TestOmitTf extends LuceneTestCase { // Verifies no *.prx exists when all fields omit term freq: public void testNoPrxFile() throws Throwable { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); @@ -247,7 +247,7 @@ public class TestOmitTf extends LuceneTestCase { // Test scores with one field with Term Freqs and one without, otherwise with equal content public void testBasic() throws Exception { Directory dir = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer). @@ -427,8 +427,8 @@ public class TestOmitTf extends LuceneTestCase { /** test that when freqs are omitted, that totalTermFreq and sumTotalTermFreq are -1 */ public void testStats() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_UNSTORED); ft.setIndexOptions(IndexOptions.DOCS_ONLY); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java index 885a19922a9..981bdfb8e3f 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java @@ -34,8 +34,8 @@ public class TestParallelAtomicReader extends LuceneTestCase { private Directory dir, dir1, dir2; public void testQueries() throws Exception { - single = single(random); - parallel = parallel(random); + single = single(random()); + parallel = parallel(random()); queryTest(new TermQuery(new Term("f1", "v1"))); queryTest(new TermQuery(new Term("f1", "v2"))); @@ -59,8 +59,8 @@ public class TestParallelAtomicReader extends LuceneTestCase { } public void testFieldNames() throws Exception { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(random()); + Directory dir2 = getDir2(random()); ParallelAtomicReader pr = new ParallelAtomicReader(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)), SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2))); FieldInfos fieldInfos = pr.getFieldInfos(); @@ -75,8 +75,8 @@ public class TestParallelAtomicReader extends LuceneTestCase { } public void testRefCounts1() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(random()); + Directory dir2 = getDir2(random()); AtomicReader ir1, ir2; // close subreaders, ParallelReader will not change refCounts, but close on its own close ParallelAtomicReader pr = new ParallelAtomicReader(ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)), @@ -93,8 +93,8 @@ public class TestParallelAtomicReader extends LuceneTestCase { } public void testRefCounts2() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(random()); + Directory dir2 = getDir2(random()); AtomicReader ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)); AtomicReader ir2 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2)); // don't close subreaders, so ParallelReader will increment refcounts @@ -115,11 +115,11 @@ public class TestParallelAtomicReader extends LuceneTestCase { public void testIncompatibleIndexes() throws IOException { // two documents: - Directory dir1 = getDir1(random); + Directory dir1 = getDir1(random()); // one document only: Directory dir2 = newDirectory(); - IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document d3 = new Document(); d3.add(newField("f3", "v1", TextField.TYPE_STORED)); @@ -137,7 +137,7 @@ public class TestParallelAtomicReader extends LuceneTestCase { } try { - new ParallelAtomicReader(random.nextBoolean(), + new ParallelAtomicReader(random().nextBoolean(), new AtomicReader[] {ir1, ir2}, new AtomicReader[] {ir1, ir2}); fail("didn't get expected exception: indexes don't have same number of documents"); @@ -154,8 +154,8 @@ public class TestParallelAtomicReader extends LuceneTestCase { } public void testIgnoreStoredFields() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(random()); + Directory dir2 = getDir2(random()); AtomicReader ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)); AtomicReader ir2 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2)); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java index 359474287e8..8d49ec26b55 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java @@ -34,8 +34,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { private Directory dir, dir1, dir2; public void testQueries() throws Exception { - single = single(random, false); - parallel = parallel(random, false); + single = single(random(), false); + parallel = parallel(random(), false); queries(); @@ -47,8 +47,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testQueriesCompositeComposite() throws Exception { - single = single(random, true); - parallel = parallel(random, true); + single = single(random(), true); + parallel = parallel(random(), true); queries(); @@ -76,8 +76,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testRefCounts1() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(random()); + Directory dir2 = getDir2(random()); DirectoryReader ir1, ir2; // close subreaders, ParallelReader will not change refCounts, but close on its own close ParallelCompositeReader pr = new ParallelCompositeReader(ir1 = DirectoryReader.open(dir1), @@ -93,8 +93,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testRefCounts2() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(random()); + Directory dir2 = getDir2(random()); DirectoryReader ir1 = DirectoryReader.open(dir1); DirectoryReader ir2 = DirectoryReader.open(dir2); @@ -116,11 +116,11 @@ public class TestParallelCompositeReader extends LuceneTestCase { public void testIncompatibleIndexes1() throws IOException { // two documents: - Directory dir1 = getDir1(random); + Directory dir1 = getDir1(random()); // one document only: Directory dir2 = newDirectory(); - IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document d3 = new Document(); d3.add(newField("f3", "v1", TextField.TYPE_STORED)); @@ -136,7 +136,7 @@ public class TestParallelCompositeReader extends LuceneTestCase { // expected exception } try { - new ParallelCompositeReader(random.nextBoolean(), ir1, ir2); + new ParallelCompositeReader(random().nextBoolean(), ir1, ir2); fail("didn't get expected exception: indexes don't have same number of documents"); } catch (IllegalArgumentException e) { // expected exception @@ -152,8 +152,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testIncompatibleIndexes2() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getInvalidStructuredDir2(random); + Directory dir1 = getDir1(random()); + Directory dir2 = getInvalidStructuredDir2(random()); DirectoryReader ir1 = DirectoryReader.open(dir1), ir2 = DirectoryReader.open(dir2); @@ -165,7 +165,7 @@ public class TestParallelCompositeReader extends LuceneTestCase { // expected exception } try { - new ParallelCompositeReader(random.nextBoolean(), readers, readers); + new ParallelCompositeReader(random().nextBoolean(), readers, readers); fail("didn't get expected exception: indexes don't have same subreader structure"); } catch (IllegalArgumentException e) { // expected exception @@ -181,8 +181,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testIncompatibleIndexes3() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(random()); + Directory dir2 = getDir2(random()); CompositeReader ir1 = new MultiReader(DirectoryReader.open(dir1), SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1))), ir2 = new MultiReader(DirectoryReader.open(dir2), DirectoryReader.open(dir2)); @@ -194,7 +194,7 @@ public class TestParallelCompositeReader extends LuceneTestCase { // expected exception } try { - new ParallelCompositeReader(random.nextBoolean(), readers, readers); + new ParallelCompositeReader(random().nextBoolean(), readers, readers); fail("didn't get expected exception: indexes don't have same subreader structure"); } catch (IllegalArgumentException e) { // expected exception @@ -210,8 +210,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testIgnoreStoredFields() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(random()); + Directory dir2 = getDir2(random()); CompositeReader ir1 = DirectoryReader.open(dir1); CompositeReader ir2 = DirectoryReader.open(dir2); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java index 0666580a2db..fd61d190339 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java @@ -42,14 +42,14 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { */ public void testEmptyIndex() throws IOException { Directory rd1 = newDirectory(); - IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); iw.close(); // create a copy: Directory rd2 = newDirectory(rd1); Directory rdOut = newDirectory(); - IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); ParallelAtomicReader apr = new ParallelAtomicReader( SlowCompositeReaderWrapper.wrap(DirectoryReader.open(rd1)), @@ -89,7 +89,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { public void testEmptyIndexWithVectors() throws IOException { Directory rd1 = newDirectory(); { - IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); Field idField = newField("id", "", TextField.TYPE_UNSTORED); doc.add(idField); @@ -103,7 +103,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { iw.addDocument(doc); iw.close(); - IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMergePolicy(NoMergePolicy.COMPOUND_FILES); IndexWriter writer = new IndexWriter(rd1, dontMergeConfig); @@ -114,14 +114,14 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { assertEquals(1, ir.numDocs()); ir.close(); - iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); iw.forceMerge(1); iw.close(); } Directory rd2 = newDirectory(); { - IndexWriter iw = new IndexWriter(rd2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw = new IndexWriter(rd2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); iw.addDocument(doc); iw.close(); @@ -129,7 +129,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { Directory rdOut = newDirectory(); - IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); final DirectoryReader reader1, reader2; ParallelAtomicReader pr = new ParallelAtomicReader( SlowCompositeReaderWrapper.wrap(reader1 = DirectoryReader.open(rd1)), diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java index a0f3599b62d..631fbedf226 100755 --- a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java @@ -40,7 +40,8 @@ public class TestParallelTermEnum extends LuceneTestCase { super.setUp(); Document doc; rd1 = newDirectory(); - IndexWriter iw1 = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw1 = new IndexWriter(rd1, newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); doc = new Document(); doc.add(newField("field1", "the quick brown fox jumps", TextField.TYPE_STORED)); @@ -49,7 +50,8 @@ public class TestParallelTermEnum extends LuceneTestCase { iw1.close(); rd2 = newDirectory(); - IndexWriter iw2 = new IndexWriter(rd2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw2 = new IndexWriter(rd2, newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); doc = new Document(); doc.add(newField("field1", "the fox jumps over the lazy dog", TextField.TYPE_STORED)); @@ -79,7 +81,7 @@ public class TestParallelTermEnum extends LuceneTestCase { BytesRef b = te.next(); assertNotNull(b); assertEquals(t, b.utf8ToString()); - DocsEnum td = _TestUtil.docs(random, te, liveDocs, null, false); + DocsEnum td = _TestUtil.docs(random(), te, liveDocs, null, false); assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocIdSetIterator.NO_MORE_DOCS); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java index 3a7fd13ce36..8336bdc0e19 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java @@ -221,25 +221,25 @@ public class TestPayloadProcessorProvider extends LuceneTestCase { @Test public void testAddIndexes() throws Exception { // addIndexes - single commit in each - doTest(random, true, 0, false); + doTest(random(), true, 0, false); // addIndexes - multiple commits in each - doTest(random, true, 0, true); + doTest(random(), true, 0, true); } @Test public void testAddIndexesIntoExisting() throws Exception { // addIndexes - single commit in each - doTest(random, false, NUM_DOCS, false); + doTest(random(), false, NUM_DOCS, false); // addIndexes - multiple commits in each - doTest(random, false, NUM_DOCS, true); + doTest(random(), false, NUM_DOCS, true); } @Test public void testRegularMerges() throws Exception { Directory dir = newDirectory(); - populateDocs(random, dir, true); + populateDocs(random(), dir, true); verifyPayloadExists(dir, "p", new BytesRef("p1"), NUM_DOCS); verifyPayloadExists(dir, "p", new BytesRef("p2"), NUM_DOCS); @@ -247,7 +247,7 @@ public class TestPayloadProcessorProvider extends LuceneTestCase { // won't get processed. Map processors = new HashMap(); processors.put(dir, new PerTermPayloadProcessor()); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); writer.setPayloadProcessorProvider(new PerDirPayloadProcessor(processors)); writer.forceMerge(1); writer.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java index c34410d68b8..9a74dc49e0a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java @@ -243,7 +243,7 @@ public class TestPayloads extends LuceneTestCase { // handle a caller that mucks with the // returned payload: if (rarely()) { - br.bytes = new byte[random.nextInt(5)]; + br.bytes = new byte[random().nextInt(5)]; } br.length = 0; br.offset = 0; @@ -351,7 +351,7 @@ public class TestPayloads extends LuceneTestCase { static final Charset utf8 = Charset.forName("UTF-8"); private void generateRandomData(byte[] data) { // this test needs the random data to be valid unicode - String s = _TestUtil.randomFixedByteLengthUnicodeString(random, data.length); + String s = _TestUtil.randomFixedByteLengthUnicodeString(random(), data.length); byte b[] = s.getBytes(utf8); assert b.length == data.length; System.arraycopy(b, 0, data, 0, b.length); @@ -503,7 +503,7 @@ public class TestPayloads extends LuceneTestCase { Directory dir = newDirectory(); final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); final String field = "test"; Thread[] ingesters = new Thread[numThreads]; @@ -612,15 +612,15 @@ public class TestPayloads extends LuceneTestCase { public void testAcrossFields() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.WHITESPACE, true)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true)); Document doc = new Document(); doc.add(new Field("hasMaybepayload", "here we go", TextField.TYPE_STORED)); writer.addDocument(doc); writer.close(); - writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.WHITESPACE, true)); + writer = new RandomIndexWriter(random(), dir, + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true)); doc = new Document(); doc.add(new Field("hasMaybepayload2", "here we go", TextField.TYPE_STORED)); writer.addDocument(doc); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java index 889679d5ddc..4e85e223d33 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java @@ -37,9 +37,9 @@ import org.apache.lucene.util._TestUtil; public class TestPerSegmentDeletes extends LuceneTestCase { public void testDeletes1() throws Exception { //IndexWriter.debug2 = System.out; - Directory dir = new MockDirectoryWrapper(new Random(random.nextLong()), new RAMDirectory()); + Directory dir = new MockDirectoryWrapper(new Random(random().nextLong()), new RAMDirectory()); IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(random())); iwc.setMergeScheduler(new SerialMergeScheduler()); iwc.setMaxBufferedDocs(5000); iwc.setRAMBufferSizeMB(100); @@ -220,13 +220,13 @@ public class TestPerSegmentDeletes extends LuceneTestCase { } } - public static int[] toDocsArray(Term term, Bits bits, IndexReader reader) + public int[] toDocsArray(Term term, Bits bits, IndexReader reader) throws IOException { Fields fields = MultiFields.getFields(reader); Terms cterms = fields.terms(term.field); TermsEnum ctermsEnum = cterms.iterator(null); if (ctermsEnum.seekExact(new BytesRef(term.text()), false)) { - DocsEnum docsEnum = _TestUtil.docs(random, ctermsEnum, bits, null, false); + DocsEnum docsEnum = _TestUtil.docs(random(), ctermsEnum, bits, null, false); return toArray(docsEnum); } return null; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java index b18acf20efb..db2f2563b0e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java @@ -79,7 +79,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo int numSnapshots = 3; Directory dir = newDirectory(); PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, psdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(random(), psdp)); prepareIndexAndSnapshots(psdp, writer, numSnapshots, "snapshot"); writer.close(); psdp.close(); @@ -88,7 +88,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo psdp = new PersistentSnapshotDeletionPolicy( new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.APPEND, TEST_VERSION_CURRENT); - new IndexWriter(dir, getConfig(random, psdp)).close(); + new IndexWriter(dir, getConfig(random(), psdp)).close(); assertSnapshotExists(dir, psdp, numSnapshots); assertEquals(numSnapshots, psdp.getSnapshots().size()); @@ -104,7 +104,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo @Test public void testInvalidSnapshotInfos() throws Exception { // Add the correct number of documents (1), but without snapshot information - IndexWriter writer = new IndexWriter(snapshotDir, getConfig(random, null)); + IndexWriter writer = new IndexWriter(snapshotDir, getConfig(random(), null)); writer.addDocument(new Document()); writer.close(); try { @@ -119,7 +119,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo @Test public void testNoSnapshotInfos() throws Exception { // Initialize an empty index in snapshotDir - PSDP should initialize successfully. - new IndexWriter(snapshotDir, getConfig(random, null)).close(); + new IndexWriter(snapshotDir, getConfig(random(), null)).close(); new PersistentSnapshotDeletionPolicy( new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.APPEND, TEST_VERSION_CURRENT).close(); @@ -128,7 +128,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo @Test(expected=IllegalStateException.class) public void testTooManySnapshotInfos() throws Exception { // Write two documents to the snapshots directory - illegal. - IndexWriter writer = new IndexWriter(snapshotDir, getConfig(random, null)); + IndexWriter writer = new IndexWriter(snapshotDir, getConfig(random(), null)); writer.addDocument(new Document()); writer.addDocument(new Document()); writer.close(); @@ -143,7 +143,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo public void testSnapshotRelease() throws Exception { Directory dir = newDirectory(); PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, psdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(random(), psdp)); prepareIndexAndSnapshots(psdp, writer, 1, "snapshot"); writer.close(); @@ -166,7 +166,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo int numSnapshots = 1; Directory dir = newDirectory(); PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, psdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(random(), psdp)); prepareIndexAndSnapshots(psdp, writer, numSnapshots, "snapshot"); writer.close(); dir.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java index e6afa2d7e27..7e76665b661 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java @@ -53,11 +53,11 @@ public class TestPostingsOffsets extends LuceneTestCase { super.setUp(); // Currently only SimpleText and Lucene40 can index offsets into postings: assumeTrue("codec does not support offsets", Codec.getDefault().getName().equals("SimpleText") || Codec.getDefault().getName().equals("Lucene40")); - iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); if (Codec.getDefault().getName().equals("Lucene40")) { // pulsing etc are not implemented - if (random.nextBoolean()) { + if (random().nextBoolean()) { iwc.setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat())); } else { iwc.setCodec(_TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat())); @@ -68,7 +68,7 @@ public class TestPostingsOffsets extends LuceneTestCase { public void testBasic() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir, iwc); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_UNSTORED); @@ -129,25 +129,25 @@ public class TestPostingsOffsets extends LuceneTestCase { public void doTestNumbers(boolean withPayloads) throws Exception { Directory dir = newDirectory(); - Analyzer analyzer = withPayloads ? new MockPayloadAnalyzer() : new MockAnalyzer(random); + Analyzer analyzer = withPayloads ? new MockPayloadAnalyzer() : new MockAnalyzer(random()); iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer); if (Codec.getDefault().getName().equals("Lucene40")) { // pulsing etc are not implemented - if (random.nextBoolean()) { + if (random().nextBoolean()) { iwc.setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat())); } else { iwc.setCodec(_TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat())); } } iwc.setMergePolicy(newLogMergePolicy()); // will rely on docids a bit for skipping - RandomIndexWriter w = new RandomIndexWriter(random, dir, iwc); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); FieldType ft = new FieldType(TextField.TYPE_STORED); ft.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); - if (random.nextBoolean()) { + if (random().nextBoolean()) { ft.setStoreTermVectors(true); - ft.setStoreTermVectorOffsets(random.nextBoolean()); - ft.setStoreTermVectorPositions(random.nextBoolean()); + ft.setStoreTermVectorOffsets(random().nextBoolean()); + ft.setStoreTermVectorPositions(random().nextBoolean()); } int numDocs = atLeast(500); @@ -192,7 +192,7 @@ public class TestPostingsOffsets extends LuceneTestCase { int numSkippingTests = atLeast(50); for (int j = 0; j < numSkippingTests; j++) { - int num = _TestUtil.nextInt(random, 100, Math.min(numDocs-1, 999)); + int num = _TestUtil.nextInt(random(), 100, Math.min(numDocs-1, 999)); DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred"), true); int doc = dp.advance(num); assertEquals(num, doc); @@ -232,7 +232,7 @@ public class TestPostingsOffsets extends LuceneTestCase { final Map>> actualTokens = new HashMap>>(); Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir, iwc); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); final int numDocs = atLeast(20); //final int numDocs = atLeast(5); @@ -242,10 +242,10 @@ public class TestPostingsOffsets extends LuceneTestCase { // TODO: randomize what IndexOptions we use; also test // changing this up in one IW buffered segment...: ft.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); - if (random.nextBoolean()) { + if (random().nextBoolean()) { ft.setStoreTermVectors(true); - ft.setStoreTermVectorOffsets(random.nextBoolean()); - ft.setStoreTermVectorPositions(random.nextBoolean()); + ft.setStoreTermVectorOffsets(random().nextBoolean()); + ft.setStoreTermVectorPositions(random().nextBoolean()); } for(int docCount=0;docCount= SIZE && random.nextInt(50) == 17) { + if (docIter >= SIZE && random().nextInt(50) == 17) { if (r != null) { r.close(); } - final boolean applyDeletions = random.nextBoolean(); + final boolean applyDeletions = random().nextBoolean(); r = w.getReader(applyDeletions); assertTrue("applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE, !applyDeletions || r.numDocs() == SIZE); } @@ -88,12 +91,12 @@ public class TestRollingUpdates extends LuceneTestCase { public void testUpdateSameDoc() throws Exception { final Directory dir = newDirectory(); - final LineFileDocs docs = new LineFileDocs(random); + final LineFileDocs docs = new LineFileDocs(random()); for (int r = 0; r < 3; r++) { final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); + TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2)); final int numUpdates = atLeast(20); - int numThreads = _TestUtil.nextInt(random, 2, 6); + int numThreads = _TestUtil.nextInt(random(), 2, 6); IndexingThread[] threads = new IndexingThread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new IndexingThread(docs, w, numUpdates); @@ -133,7 +136,7 @@ public class TestRollingUpdates extends LuceneTestCase { Document doc = new Document();// docs.nextDoc(); doc.add(newField("id", "test", StringField.TYPE_UNSTORED)); writer.updateDocument(new Term("id", "test"), doc); - if (random.nextInt(3) == 0) { + if (random().nextInt(3) == 0) { if (open == null) { open = IndexReader.open(writer, true); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java b/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java index 40a52b77a14..58d6365791a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java @@ -36,7 +36,7 @@ public class TestSameTokenSamePosition extends LuceneTestCase { */ public void test() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter riw = new RandomIndexWriter(random, dir); + RandomIndexWriter riw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new TextField("eng", new BugReproTokenStream())); riw.addDocument(doc); @@ -49,7 +49,7 @@ public class TestSameTokenSamePosition extends LuceneTestCase { */ public void testMoreDocs() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter riw = new RandomIndexWriter(random, dir); + RandomIndexWriter riw = new RandomIndexWriter(random(), dir); for (int i = 0; i < 100; i++) { Document doc = new Document(); doc.add(new TextField("eng", new BugReproTokenStream())); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java index 2ee7d1f3b19..88f61aaa19c 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java @@ -49,11 +49,11 @@ public class TestSegmentMerger extends LuceneTestCase { merge1Dir = newDirectory(); merge2Dir = newDirectory(); DocHelper.setupDoc(doc1); - SegmentInfo info1 = DocHelper.writeDoc(random, merge1Dir, doc1); + SegmentInfo info1 = DocHelper.writeDoc(random(), merge1Dir, doc1); DocHelper.setupDoc(doc2); - SegmentInfo info2 = DocHelper.writeDoc(random, merge2Dir, doc2); - reader1 = new SegmentReader(info1, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); - reader2 = new SegmentReader(info2, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentInfo info2 = DocHelper.writeDoc(random(), merge2Dir, doc2); + reader1 = new SegmentReader(info1, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random())); + reader2 = new SegmentReader(info2, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random())); } @Override @@ -76,7 +76,7 @@ public class TestSegmentMerger extends LuceneTestCase { public void testMerge() throws IOException { final Codec codec = Codec.getDefault(); - SegmentMerger merger = new SegmentMerger(InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, mergedSegment, MergeState.CheckAbort.NONE, null, new FieldInfos(new FieldInfos.FieldNumberBiMap()), codec, newIOContext(random)); + SegmentMerger merger = new SegmentMerger(InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, mergedSegment, MergeState.CheckAbort.NONE, null, new FieldInfos(new FieldInfos.FieldNumberBiMap()), codec, newIOContext(random())); merger.add(reader1); merger.add(reader2); MergeState mergeState = merger.merge(); @@ -86,7 +86,7 @@ public class TestSegmentMerger extends LuceneTestCase { //Should be able to open a new SegmentReader against the new directory SegmentReader mergedReader = new SegmentReader(new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, codec, fieldInfos), - DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random())); assertTrue(mergedReader != null); assertTrue(mergedReader.numDocs() == 2); Document newDoc1 = mergedReader.document(0); @@ -97,7 +97,7 @@ public class TestSegmentMerger extends LuceneTestCase { assertTrue(newDoc2 != null); assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); - DocsEnum termDocs = _TestUtil.docs(random, mergedReader, + DocsEnum termDocs = _TestUtil.docs(random(), mergedReader, DocHelper.TEXT_FIELD_2_KEY, new BytesRef("field"), MultiFields.getLiveDocs(mergedReader), diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java index 870f5fe3698..f537ad23e44 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java @@ -41,7 +41,7 @@ public class TestSegmentReader extends LuceneTestCase { super.setUp(); dir = newDirectory(); DocHelper.setupDoc(testDoc); - SegmentInfo info = DocHelper.writeDoc(random, dir, testDoc); + SegmentInfo info = DocHelper.writeDoc(random(), dir, testDoc); reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, IOContext.READ); } @@ -128,7 +128,7 @@ public class TestSegmentReader extends LuceneTestCase { } } - DocsEnum termDocs = _TestUtil.docs(random, reader, + DocsEnum termDocs = _TestUtil.docs(random(), reader, DocHelper.TEXT_FIELD_1_KEY, new BytesRef("field"), MultiFields.getLiveDocs(reader), @@ -136,7 +136,7 @@ public class TestSegmentReader extends LuceneTestCase { false); assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); - termDocs = _TestUtil.docs(random, reader, + termDocs = _TestUtil.docs(random(), reader, DocHelper.NO_NORMS_KEY, new BytesRef(DocHelper.NO_NORMS_TEXT), MultiFields.getLiveDocs(reader), diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java index f5cc85a26aa..4d5d9fb5d8e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java @@ -38,7 +38,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { super.setUp(); dir = newDirectory(); DocHelper.setupDoc(testDoc); - info = DocHelper.writeDoc(random, dir, testDoc); + info = DocHelper.writeDoc(random(), dir, testDoc); } @Override @@ -57,13 +57,13 @@ public class TestSegmentTermDocs extends LuceneTestCase { public void testTermDocs(int indexDivisor) throws IOException { //After adding the document, we should be able to read it back in - SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(random())); assertTrue(reader != null); assertEquals(indexDivisor, reader.getTermInfosIndexDivisor()); TermsEnum terms = reader.fields().terms(DocHelper.TEXT_FIELD_2_KEY).iterator(null); terms.seekCeil(new BytesRef("field")); - DocsEnum termDocs = _TestUtil.docs(random, terms, reader.getLiveDocs(), null, true); + DocsEnum termDocs = _TestUtil.docs(random(), terms, reader.getLiveDocs(), null, true); if (termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int docId = termDocs.docID(); assertTrue(docId == 0); @@ -80,9 +80,9 @@ public class TestSegmentTermDocs extends LuceneTestCase { public void testBadSeek(int indexDivisor) throws IOException { { //After adding the document, we should be able to read it back in - SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(random())); assertTrue(reader != null); - DocsEnum termDocs = _TestUtil.docs(random, reader, + DocsEnum termDocs = _TestUtil.docs(random(), reader, "textField2", new BytesRef("bad"), reader.getLiveDocs(), @@ -94,9 +94,9 @@ public class TestSegmentTermDocs extends LuceneTestCase { } { //After adding the document, we should be able to read it back in - SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(random())); assertTrue(reader != null); - DocsEnum termDocs = _TestUtil.docs(random, reader, + DocsEnum termDocs = _TestUtil.docs(random(), reader, "junk", new BytesRef("bad"), reader.getLiveDocs(), @@ -113,7 +113,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { public void testSkipTo(int indexDivisor) throws IOException { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); Term ta = new Term("content","aaa"); for(int i = 0; i < 10; i++) @@ -133,7 +133,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { IndexReader reader = IndexReader.open(dir, indexDivisor); - DocsEnum tdocs = _TestUtil.docs(random, reader, + DocsEnum tdocs = _TestUtil.docs(random(), reader, ta.field(), new BytesRef(ta.text()), MultiFields.getLiveDocs(reader), @@ -158,7 +158,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { assertFalse(tdocs.advance(10) != DocIdSetIterator.NO_MORE_DOCS); // without next - tdocs = _TestUtil.docs(random, reader, + tdocs = _TestUtil.docs(random(), reader, ta.field(), new BytesRef(ta.text()), MultiFields.getLiveDocs(reader), @@ -176,7 +176,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { // exactly skipInterval documents and therefore with optimization // with next - tdocs = _TestUtil.docs(random, reader, + tdocs = _TestUtil.docs(random(), reader, tb.field(), new BytesRef(tb.text()), MultiFields.getLiveDocs(reader), @@ -200,7 +200,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { assertFalse(tdocs.advance(26) != DocIdSetIterator.NO_MORE_DOCS); // without next - tdocs = _TestUtil.docs(random, reader, + tdocs = _TestUtil.docs(random(), reader, tb.field(), new BytesRef(tb.text()), MultiFields.getLiveDocs(reader), @@ -220,7 +220,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { // much more than skipInterval documents and therefore with optimization // with next - tdocs = _TestUtil.docs(random, reader, + tdocs = _TestUtil.docs(random(), reader, tc.field(), new BytesRef(tc.text()), MultiFields.getLiveDocs(reader), @@ -246,7 +246,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { assertFalse(tdocs.advance(76) != DocIdSetIterator.NO_MORE_DOCS); //without next - tdocs = _TestUtil.docs(random, reader, + tdocs = _TestUtil.docs(random(), reader, tc.field(), new BytesRef(tc.text()), MultiFields.getLiveDocs(reader), @@ -271,7 +271,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { public void testIndexDivisor() throws IOException { testDoc = new Document(); DocHelper.setupDoc(testDoc); - DocHelper.writeDoc(random, dir, testDoc); + DocHelper.writeDoc(random(), dir, testDoc); testTermDocs(2); testBadSeek(2); testSkipTo(2); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java index 259e5d69863..c2b6d32ec9b 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java @@ -49,7 +49,7 @@ public class TestSegmentTermEnum extends LuceneTestCase { public void testTermEnum() throws IOException { IndexWriter writer = null; - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); // ADD 100 documents with term : aaa // add 100 documents with terms: aaa bbb @@ -65,7 +65,7 @@ public class TestSegmentTermEnum extends LuceneTestCase { verifyDocFreq(); // merge segments - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); writer.close(); @@ -75,7 +75,7 @@ public class TestSegmentTermEnum extends LuceneTestCase { public void testPrevTermAtEnd() throws IOException { - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()))); addDoc(writer, "aaa bbb"); writer.close(); SegmentReader reader = getOnlySegmentReader(IndexReader.open(dir)); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java index 1e9f6d56a81..7d0ac66800c 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java @@ -92,7 +92,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { @Test public void testSnapshotDeletionPolicy() throws Exception { Directory fsDir = newDirectory(); - runTest(random, fsDir); + runTest(random(), fsDir); fsDir.close(); } @@ -206,7 +206,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { byte[] buffer = new byte[4096]; private void readFile(Directory dir, String name) throws Exception { - IndexInput input = dir.openInput(name, newIOContext(random)); + IndexInput input = dir.openInput(name, newIOContext(random())); try { long size = dir.fileLength(name); long bytesLeft = size; @@ -238,7 +238,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { // Create 3 snapshots: snapshot0, snapshot1, snapshot2 Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(random(), sdp)); prepareIndexAndSnapshots(sdp, writer, numSnapshots, "snapshot"); writer.close(); @@ -249,7 +249,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { // open a new IndexWriter w/ no snapshots to keep and assert that all snapshots are gone. sdp = getDeletionPolicy(); - writer = new IndexWriter(dir, getConfig(random, sdp)); + writer = new IndexWriter(dir, getConfig(random(), sdp)); writer.deleteUnusedFiles(); writer.close(); assertEquals("no snapshots should exist", 1, DirectoryReader.listCommits(dir).size()); @@ -269,7 +269,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { public void testMultiThreadedSnapshotting() throws Exception { Directory dir = newDirectory(); final SnapshotDeletionPolicy sdp = getDeletionPolicy(); - final IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + final IndexWriter writer = new IndexWriter(dir, getConfig(random(), sdp)); Thread[] threads = new Thread[10]; for (int i = 0; i < threads.length; i++) { @@ -314,12 +314,12 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { int numSnapshots = 2; Directory dir = newDirectory(); SnapshotDeletionPolicy sdp = getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(random(), sdp)); prepareIndexAndSnapshots(sdp, writer, numSnapshots, "snapshot"); writer.close(); // now open the writer on "snapshot0" - make sure it succeeds - writer = new IndexWriter(dir, getConfig(random, sdp).setIndexCommit(sdp.getSnapshot("snapshot0"))); + writer = new IndexWriter(dir, getConfig(random(), sdp).setIndexCommit(sdp.getSnapshot("snapshot0"))); // this does the actual rollback writer.commit(); writer.deleteUnusedFiles(); @@ -336,7 +336,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { public void testReleaseSnapshot() throws Exception { Directory dir = newDirectory(); SnapshotDeletionPolicy sdp = getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(random(), sdp)); prepareIndexAndSnapshots(sdp, writer, 1, "snapshot"); // Create another commit - we must do that, because otherwise the "snapshot" @@ -368,13 +368,13 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { int numSnapshots = 3; Directory dir = newDirectory(); SnapshotDeletionPolicy sdp = getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(random(), sdp)); prepareIndexAndSnapshots(sdp, writer, numSnapshots, "snapshot"); writer.close(); // Make a new policy and initialize with snapshots. sdp = getDeletionPolicy(sdp.getSnapshots()); - writer = new IndexWriter(dir, getConfig(random, sdp)); + writer = new IndexWriter(dir, getConfig(random(), sdp)); // attempt to delete unused files - the snapshotted files should not be deleted writer.deleteUnusedFiles(); writer.close(); @@ -386,7 +386,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { public void testSnapshotLastCommitTwice() throws Exception { Directory dir = newDirectory(); SnapshotDeletionPolicy sdp = getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(random(), sdp)); writer.addDocument(new Document()); writer.commit(); @@ -415,7 +415,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { // on onInit(). Directory dir = newDirectory(); SnapshotDeletionPolicy sdp = getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(random(), sdp)); writer.addDocument(new Document()); writer.commit(); IndexCommit ic = sdp.snapshot("s1"); @@ -426,14 +426,14 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { // open a new writer w/ KeepOnlyLastCommit policy, so it will delete "s1" // commit. - new IndexWriter(dir, getConfig(random, null)).close(); + new IndexWriter(dir, getConfig(random(), null)).close(); assertFalse("snapshotted commit should not exist", dir.fileExists(ic.getSegmentsFileName())); // Now reinit SDP from the commits in the index - the snapshot id should not // exist anymore. sdp = getDeletionPolicy(sdp.getSnapshots()); - new IndexWriter(dir, getConfig(random, sdp)).close(); + new IndexWriter(dir, getConfig(random(), sdp)).close(); try { sdp.getSnapshot("s1"); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java index 33810da9bbd..9fd67723157 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java @@ -34,7 +34,7 @@ public class TestStressAdvance extends LuceneTestCase { System.out.println("\nTEST: iter=" + iter); } Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); final Set aDocs = new HashSet(); final Document doc = new Document(); final Field f = newField("field", "", StringField.TYPE_UNSTORED); @@ -43,7 +43,7 @@ public class TestStressAdvance extends LuceneTestCase { doc.add(idField); int num = atLeast(4097); for(int id=0;id docs = indexRandom(5, 3, 100, dir1, maxThreadStates, doReaderPooling); - indexSerial(random, docs, dir2); + indexSerial(random(), docs, dir2); // verifying verify // verifyEquals(dir1, dir1, "id"); @@ -101,16 +101,16 @@ public class TestStressIndexing2 extends LuceneTestCase { if (VERBOSE) { System.out.println("\n\nTEST: top iter=" + i); } - sameFieldOrder=random.nextBoolean(); - mergeFactor=random.nextInt(3)+2; - maxBufferedDocs=random.nextInt(3)+2; - int maxThreadStates = 1+random.nextInt(10); - boolean doReaderPooling = random.nextBoolean(); + sameFieldOrder=random().nextBoolean(); + mergeFactor=random().nextInt(3)+2; + maxBufferedDocs=random().nextInt(3)+2; + int maxThreadStates = 1+random().nextInt(10); + boolean doReaderPooling = random().nextBoolean(); seed++; - int nThreads=random.nextInt(5)+1; - int iter=random.nextInt(5)+1; - int range=random.nextInt(20)+1; + int nThreads=random().nextInt(5)+1; + int iter=random().nextInt(5)+1; + int range=random().nextInt(20)+1; Directory dir1 = newDirectory(); Directory dir2 = newDirectory(); if (VERBOSE) { @@ -120,7 +120,7 @@ public class TestStressIndexing2 extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: index serial"); } - indexSerial(random, docs, dir2); + indexSerial(random(), docs, dir2); if (VERBOSE) { System.out.println("TEST: verify"); } @@ -151,7 +151,7 @@ public class TestStressIndexing2 extends LuceneTestCase { public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB( + TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB( 0.1).setMaxBufferedDocs(maxBufferedDocs).setMergePolicy(newLogMergePolicy())); w.commit(); LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy(); @@ -202,7 +202,7 @@ public class TestStressIndexing2 extends LuceneTestCase { boolean doReaderPooling) throws IOException, InterruptedException { Map docs = new HashMap(); IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE) + TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE) .setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs).setIndexerThreadPool(new ThreadAffinityDocumentsWriterThreadPool(maxThreadStates)) .setReaderPooling(doReaderPooling).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy(); @@ -266,13 +266,13 @@ public class TestStressIndexing2 extends LuceneTestCase { w.close(); } - public static void verifyEquals(Random r, DirectoryReader r1, Directory dir2, String idField) throws Throwable { + public void verifyEquals(Random r, DirectoryReader r1, Directory dir2, String idField) throws Throwable { DirectoryReader r2 = IndexReader.open(dir2); verifyEquals(r1, r2, idField); r2.close(); } - public static void verifyEquals(Directory dir1, Directory dir2, String idField) throws Throwable { + public void verifyEquals(Directory dir1, Directory dir2, String idField) throws Throwable { DirectoryReader r1 = IndexReader.open(dir1); DirectoryReader r2 = IndexReader.open(dir2); verifyEquals(r1, r2, idField); @@ -298,7 +298,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } - public static void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField) throws Throwable { + public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField) throws Throwable { if (VERBOSE) { System.out.println("\nr1 docs:"); printDocs(r1); @@ -336,7 +336,7 @@ public class TestStressIndexing2 extends LuceneTestCase { // deleted docs): DocsEnum docs = null; while(termsEnum.next() != null) { - docs = _TestUtil.docs(random, termsEnum, null, docs, false); + docs = _TestUtil.docs(random(), termsEnum, null, docs, false); while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { fail("r1 is not empty but r2 is"); } @@ -356,9 +356,9 @@ public class TestStressIndexing2 extends LuceneTestCase { break; } - termDocs1 = _TestUtil.docs(random, termsEnum, liveDocs1, termDocs1, false); + termDocs1 = _TestUtil.docs(random(), termsEnum, liveDocs1, termDocs1, false); if (termsEnum2.seekExact(term, false)) { - termDocs2 = _TestUtil.docs(random, termsEnum2, liveDocs2, termDocs2, false); + termDocs2 = _TestUtil.docs(random(), termsEnum2, liveDocs2, termDocs2, false); } else { termDocs2 = null; } @@ -417,7 +417,7 @@ public class TestStressIndexing2 extends LuceneTestCase { System.out.println(" pos=" + dpEnum.nextPosition()); } } else { - dEnum = _TestUtil.docs(random, termsEnum3, null, dEnum, true); + dEnum = _TestUtil.docs(random(), termsEnum3, null, dEnum, true); assertNotNull(dEnum); assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); final int freq = dEnum.freq(); @@ -451,7 +451,7 @@ public class TestStressIndexing2 extends LuceneTestCase { System.out.println(" pos=" + dpEnum.nextPosition()); } } else { - dEnum = _TestUtil.docs(random, termsEnum3, null, dEnum, true); + dEnum = _TestUtil.docs(random(), termsEnum3, null, dEnum, true); assertNotNull(dEnum); assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); final int freq = dEnum.freq(); @@ -508,7 +508,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } //System.out.println("TEST: term1=" + term1); - docs1 = _TestUtil.docs(random, termsEnum1, liveDocs1, docs1, true); + docs1 = _TestUtil.docs(random(), termsEnum1, liveDocs1, docs1, true); while (docs1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int d = docs1.docID(); int f = docs1.freq(); @@ -542,7 +542,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } //System.out.println("TEST: term1=" + term1); - docs2 = _TestUtil.docs(random, termsEnum2, liveDocs2, docs2, true); + docs2 = _TestUtil.docs(random(), termsEnum2, liveDocs2, docs2, true); while (docs2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int d = r2r1[docs2.docID()]; int f = docs2.freq(); @@ -669,8 +669,8 @@ public class TestStressIndexing2 extends LuceneTestCase { assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum1.nextDoc()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum2.nextDoc()); } else { - dEnum1 = _TestUtil.docs(random, termsEnum1, null, dEnum1, true); - dEnum2 = _TestUtil.docs(random, termsEnum2, null, dEnum2, true); + dEnum1 = _TestUtil.docs(random(), termsEnum1, null, dEnum1, true); + dEnum2 = _TestUtil.docs(random(), termsEnum2, null, dEnum2, true); assertNotNull(dEnum1); assertNotNull(dEnum2); int docID1 = dEnum1.nextDoc(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java index a607e60c201..5e1d3560bad 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java @@ -66,20 +66,20 @@ public class TestStressNRT extends LuceneTestCase { public void test() throws Exception { // update variables - final int commitPercent = random.nextInt(20); - final int softCommitPercent = random.nextInt(100); // what percent of the commits are soft - final int deletePercent = random.nextInt(50); - final int deleteByQueryPercent = random.nextInt(25); + final int commitPercent = random().nextInt(20); + final int softCommitPercent = random().nextInt(100); // what percent of the commits are soft + final int deletePercent = random().nextInt(50); + final int deleteByQueryPercent = random().nextInt(25); final int ndocs = atLeast(50); - final int nWriteThreads = _TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 10 : 5); - final int maxConcurrentCommits = _TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 10 : 5); // number of committers at a time... needed if we want to avoid commit errors due to exceeding the max + final int nWriteThreads = _TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5); + final int maxConcurrentCommits = _TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5); // number of committers at a time... needed if we want to avoid commit errors due to exceeding the max - final boolean tombstones = random.nextBoolean(); + final boolean tombstones = random().nextBoolean(); // query variables final AtomicLong operations = new AtomicLong(atLeast(10000)); // number of query operations to perform in total - final int nReadThreads = _TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 10 : 5); + final int nReadThreads = _TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5); initModel(ndocs); final FieldType storedOnlyType = new FieldType(); @@ -106,14 +106,14 @@ public class TestStressNRT extends LuceneTestCase { Directory dir = newDirectory(); - final RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.setDoRandomForceMergeAssert(false); writer.commit(); reader = IndexReader.open(dir); for (int i=0; i acceptTerms = new HashSet(); final TreeSet sortedAcceptTerms = new TreeSet(); - final double keepPct = random.nextDouble(); + final double keepPct = random().nextDouble(); Automaton a; if (iter == 0) { if (VERBOSE) { @@ -256,7 +248,7 @@ public class TestTermsEnum extends LuceneTestCase { } for (String s : terms) { final String s2; - if (random.nextDouble() <= keepPct) { + if (random().nextDouble() <= keepPct) { s2 = s; } else { s2 = getRandomString(); @@ -288,7 +280,7 @@ public class TestTermsEnum extends LuceneTestCase { } for(int iter2=0;iter2<100;iter2++) { - final BytesRef startTerm = acceptTermsArray.length == 0 || random.nextBoolean() ? null : acceptTermsArray[random.nextInt(acceptTermsArray.length)]; + final BytesRef startTerm = acceptTermsArray.length == 0 || random().nextBoolean() ? null : acceptTermsArray[random().nextInt(acceptTermsArray.length)]; if (VERBOSE) { System.out.println("\nTEST: iter2=" + iter2 + " startTerm=" + (startTerm == null ? "" : startTerm.utf8ToString())); @@ -332,7 +324,7 @@ public class TestTermsEnum extends LuceneTestCase { } assertEquals(expected, actual); assertEquals(1, te.docFreq()); - docsEnum = _TestUtil.docs(random, te, null, docsEnum, false); + docsEnum = _TestUtil.docs(random(), te, null, docsEnum, false); final int docID = docsEnum.nextDoc(); assertTrue(docID != DocIdSetIterator.NO_MORE_DOCS); assertEquals(docIDToID[docID], termToID.get(expected).intValue()); @@ -356,13 +348,13 @@ public class TestTermsEnum extends LuceneTestCase { private IndexReader makeIndex(String... terms) throws Exception { d = newDirectory(); - IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); /* iwc.setCodec(new StandardCodec(minTermsInBlock, maxTermsInBlock)); */ - final RandomIndexWriter w = new RandomIndexWriter(random, d, iwc); + final RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc); for(String term : terms) { Document doc = new Document(); Field f = newField(FIELD, term, StringField.TYPE_UNSTORED); @@ -502,7 +494,7 @@ public class TestTermsEnum extends LuceneTestCase { public void testZeroTerms() throws Exception { d = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, d); + final RandomIndexWriter w = new RandomIndexWriter(random(), d); Document doc = new Document(); doc.add(newField("field", "one two three", TextField.TYPE_UNSTORED)); doc = new Document(); @@ -525,18 +517,18 @@ public class TestTermsEnum extends LuceneTestCase { private String getRandomString() { //return _TestUtil.randomSimpleString(random); - return _TestUtil.randomRealisticUnicodeString(random); + return _TestUtil.randomRealisticUnicodeString(random()); } public void testRandomTerms() throws Exception { - final String[] terms = new String[_TestUtil.nextInt(random, 1, atLeast(1000))]; + final String[] terms = new String[_TestUtil.nextInt(random(), 1, atLeast(1000))]; final Set seen = new HashSet(); - final boolean allowEmptyString = random.nextBoolean(); + final boolean allowEmptyString = random().nextBoolean(); - if (random.nextInt(10) == 7 && terms.length > 2) { + if (random().nextInt(10) == 7 && terms.length > 2) { // Sometimes add a bunch of terms sharing a longish common prefix: - final int numTermsSamePrefix = random.nextInt(terms.length/2); + final int numTermsSamePrefix = random().nextInt(terms.length/2); if (numTermsSamePrefix > 0) { String prefix; while(true) { @@ -571,7 +563,7 @@ public class TestTermsEnum extends LuceneTestCase { // sugar private boolean seekExact(TermsEnum te, String term) throws IOException { - return te.seekExact(new BytesRef(term), random.nextBoolean()); + return te.seekExact(new BytesRef(term), random().nextBoolean()); } // sugar @@ -628,7 +620,7 @@ public class TestTermsEnum extends LuceneTestCase { final BytesRef t; int loc; final TermState termState; - if (random.nextInt(6) == 4) { + if (random().nextInt(6) == 4) { // pick term that doens't exist: t = getNonExistTerm(validTerms); termState = null; @@ -636,8 +628,8 @@ public class TestTermsEnum extends LuceneTestCase { System.out.println("\nTEST: invalid term=" + t.utf8ToString()); } loc = Arrays.binarySearch(validTerms, t); - } else if (termStates.size() != 0 && random.nextInt(4) == 1) { - final TermAndState ts = termStates.get(random.nextInt(termStates.size())); + } else if (termStates.size() != 0 && random().nextInt(4) == 1) { + final TermAndState ts = termStates.get(random().nextInt(termStates.size())); t = ts.term; loc = Arrays.binarySearch(validTerms, t); assertTrue(loc >= 0); @@ -647,7 +639,7 @@ public class TestTermsEnum extends LuceneTestCase { } } else { // pick valid term - loc = random.nextInt(validTerms.length); + loc = random().nextInt(validTerms.length); t = BytesRef.deepCopyOf(validTerms[loc]); termState = null; if (VERBOSE) { @@ -656,7 +648,7 @@ public class TestTermsEnum extends LuceneTestCase { } // seekCeil or seekExact: - final boolean doSeekExact = random.nextBoolean(); + final boolean doSeekExact = random().nextBoolean(); if (termState != null) { if (VERBOSE) { System.out.println(" seekExact termState"); @@ -666,13 +658,13 @@ public class TestTermsEnum extends LuceneTestCase { if (VERBOSE) { System.out.println(" seekExact"); } - assertEquals(loc >= 0, te.seekExact(t, random.nextBoolean())); + assertEquals(loc >= 0, te.seekExact(t, random().nextBoolean())); } else { if (VERBOSE) { System.out.println(" seekCeil"); } - final TermsEnum.SeekStatus result = te.seekCeil(t, random.nextBoolean()); + final TermsEnum.SeekStatus result = te.seekCeil(t, random().nextBoolean()); if (VERBOSE) { System.out.println(" got " + result); } @@ -700,7 +692,7 @@ public class TestTermsEnum extends LuceneTestCase { } // Do a bunch of next's after the seek - final int numNext = random.nextInt(validTerms.length); + final int numNext = random().nextInt(validTerms.length); for(int nextCount=0;nextCount matchedTerms = new ArrayList(); for(BytesRef t : terms) { @@ -114,21 +114,21 @@ public class TestTermsEnum2 extends LuceneTestCase { /** seeks to every term accepted by some automata */ public void testSeeking() throws Exception { for (int i = 0; i < numIterations; i++) { - String reg = AutomatonTestUtil.randomRegexp(random); + String reg = AutomatonTestUtil.randomRegexp(random()); Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton(); TermsEnum te = MultiFields.getTerms(reader, "field").iterator(null); ArrayList unsortedTerms = new ArrayList(terms); - Collections.shuffle(unsortedTerms, random); + Collections.shuffle(unsortedTerms, random()); for (BytesRef term : unsortedTerms) { if (BasicOperations.run(automaton, term.utf8ToString())) { // term is accepted - if (random.nextBoolean()) { + if (random().nextBoolean()) { // seek exact - assertTrue(te.seekExact(term, random.nextBoolean())); + assertTrue(te.seekExact(term, random().nextBoolean())); } else { // seek ceil - assertEquals(SeekStatus.FOUND, te.seekCeil(term, random.nextBoolean())); + assertEquals(SeekStatus.FOUND, te.seekCeil(term, random().nextBoolean())); assertEquals(term, te.term()); } } @@ -142,14 +142,14 @@ public class TestTermsEnum2 extends LuceneTestCase { TermsEnum te = MultiFields.getTerms(reader, "field").iterator(null); for (BytesRef term : terms) { - int c = random.nextInt(3); + int c = random().nextInt(3); if (c == 0) { assertEquals(term, te.next()); } else if (c == 1) { - assertEquals(SeekStatus.FOUND, te.seekCeil(term, random.nextBoolean())); + assertEquals(SeekStatus.FOUND, te.seekCeil(term, random().nextBoolean())); assertEquals(term, te.term()); } else { - assertTrue(te.seekExact(term, random.nextBoolean())); + assertTrue(te.seekExact(term, random().nextBoolean())); } } } @@ -158,7 +158,7 @@ public class TestTermsEnum2 extends LuceneTestCase { /** tests intersect: TODO start at a random term! */ public void testIntersect() throws Exception { for (int i = 0; i < numIterations; i++) { - String reg = AutomatonTestUtil.randomRegexp(random); + String reg = AutomatonTestUtil.randomRegexp(random()); Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton(); CompiledAutomaton ca = new CompiledAutomaton(automaton, SpecialOperations.isFinite(automaton), false); TermsEnum te = MultiFields.getTerms(reader, "field").intersect(ca, null); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java b/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java index 520791990d9..84be8d019b2 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java @@ -28,12 +28,13 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.util.English; import org.apache.lucene.util.LuceneTestCase; +import org.junit.BeforeClass; import java.util.Random; public class TestThreadedForceMerge extends LuceneTestCase { - - private static final Analyzer ANALYZER = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + + private static Analyzer ANALYZER; private final static int NUM_THREADS = 3; //private final static int NUM_THREADS = 5; @@ -44,6 +45,11 @@ public class TestThreadedForceMerge extends LuceneTestCase { private volatile boolean failed; + @BeforeClass + public static void setup() { + ANALYZER = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); + } + private void setFailed() { failed = true; } @@ -137,7 +143,7 @@ public class TestThreadedForceMerge extends LuceneTestCase { */ public void testThreadedForceMerge() throws Exception { Directory directory = newDirectory(); - runTest(random, directory); + runTest(random(), directory); directory.close(); } } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java index 6b61e1aa18f..e876d0b0085 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java @@ -29,7 +29,7 @@ public class TestTieredMergePolicy extends LuceneTestCase { public void testForceMergeDeletes() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); TieredMergePolicy tmp = newTieredMergePolicy(); conf.setMergePolicy(tmp); conf.setMaxBufferedDocs(4); @@ -72,7 +72,7 @@ public class TestTieredMergePolicy extends LuceneTestCase { System.out.println("TEST: iter=" + iter); } Directory dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); conf.setMergeScheduler(new SerialMergeScheduler()); TieredMergePolicy tmp = newTieredMergePolicy(); conf.setMergePolicy(tmp); @@ -82,7 +82,7 @@ public class TestTieredMergePolicy extends LuceneTestCase { IndexWriter w = new IndexWriter(dir, conf); int maxCount = 0; - final int numDocs = _TestUtil.nextInt(random, 20, 100); + final int numDocs = _TestUtil.nextInt(random(), 20, 100); for(int i=0;i data = new HashMap(); data.put("index", "Rolled back to 1-"+id); @@ -127,7 +127,7 @@ public class TestTransactionRollback extends LuceneTestCase { dir = newDirectory(); //Build index, of records 1 to 100, committing after each batch of 10 IndexDeletionPolicy sdp=new KeepAllDeletionPolicy(); - IndexWriter w=new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(sdp)); + IndexWriter w=new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setIndexDeletionPolicy(sdp)); for(int currentRecordId=1;currentRecordId<=100;currentRecordId++) { Document doc=new Document(); @@ -202,7 +202,7 @@ public class TestTransactionRollback extends LuceneTestCase { for(int i=0;i<2;i++) { // Unless you specify a prior commit point, rollback // should not work: - new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) + new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setIndexDeletionPolicy(new DeleteLastCommitPolicy())).close(); IndexReader r = IndexReader.open(dir); assertEquals(100, r.numDocs()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java b/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java index f48c423edf5..02834f4beec 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java @@ -37,7 +37,7 @@ public class TestTransactions extends LuceneTestCase { private class RandomFailure extends MockDirectoryWrapper.Failure { @Override public void eval(MockDirectoryWrapper dir) throws IOException { - if (TestTransactions.doFail && random.nextInt() % 10 <= 3) { + if (TestTransactions.doFail && random().nextInt() % 10 <= 3) { throw new IOException("now failing randomly but on purpose"); } } @@ -96,7 +96,7 @@ public class TestTransactions extends LuceneTestCase { IndexWriter writer1 = new IndexWriter( dir1, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMaxBufferedDocs(3). setMergeScheduler(new ConcurrentMergeScheduler()). setMergePolicy(newLogMergePolicy(2)) @@ -107,7 +107,7 @@ public class TestTransactions extends LuceneTestCase { // happen @ different times IndexWriter writer2 = new IndexWriter( dir2, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMaxBufferedDocs(2). setMergeScheduler(new ConcurrentMergeScheduler()). setMergePolicy(newLogMergePolicy(3)) @@ -152,7 +152,7 @@ public class TestTransactions extends LuceneTestCase { customType.setStoreTermVectors(true); for(int j=0; j<10; j++) { Document d = new Document(); - int n = random.nextInt(); + int n = random().nextInt(); d.add(newField("id", Integer.toString(nextID++), customType)); d.add(newField("contents", English.intToEnglish(n), TextField.TYPE_UNSTORED)); writer.addDocument(d); @@ -194,10 +194,10 @@ public class TestTransactions extends LuceneTestCase { } public void initIndex(Directory dir) throws Throwable { - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for(int j=0; j<7; j++) { Document d = new Document(); - int n = random.nextInt(); + int n = random().nextInt(); d.add(newField("contents", English.intToEnglish(n), TextField.TYPE_UNSTORED)); writer.addDocument(d); } @@ -206,8 +206,8 @@ public class TestTransactions extends LuceneTestCase { public void testTransactions() throws Throwable { // we cant use non-ramdir on windows, because this test needs to double-write. - MockDirectoryWrapper dir1 = new MockDirectoryWrapper(random, new RAMDirectory()); - MockDirectoryWrapper dir2 = new MockDirectoryWrapper(random, new RAMDirectory()); + MockDirectoryWrapper dir1 = new MockDirectoryWrapper(random(), new RAMDirectory()); + MockDirectoryWrapper dir2 = new MockDirectoryWrapper(random(), new RAMDirectory()); dir1.setPreventDoubleWrite(false); dir2.setPreventDoubleWrite(false); dir1.failOn(new RandomFailure()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java b/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java index d26b6d038a8..3137b95bc87 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java @@ -69,28 +69,28 @@ public class TestTypePromotion extends LuceneTestCase { throws CorruptIndexException, IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); int num_1 = atLeast(200); int num_2 = atLeast(200); int num_3 = atLeast(200); long[] values = new long[num_1 + num_2 + num_3]; index(writer, - randomValueType(types, random), values, 0, num_1); + randomValueType(types, random()), values, 0, num_1); writer.commit(); index(writer, - randomValueType(types, random), values, num_1, num_2); + randomValueType(types, random()), values, num_1, num_2); writer.commit(); - if (random.nextInt(4) == 0) { + if (random().nextInt(4) == 0) { // once in a while use addIndexes writer.forceMerge(1); Directory dir_2 = newDirectory() ; IndexWriter writer_2 = new IndexWriter(dir_2, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); index(writer_2, - randomValueType(types, random), values, num_1 + num_2, num_3); + randomValueType(types, random()), values, num_1 + num_2, num_3); writer_2.commit(); writer_2.close(); if (rarely()) { @@ -104,7 +104,7 @@ public class TestTypePromotion extends LuceneTestCase { dir_2.close(); } else { index(writer, - randomValueType(types, random), values, num_1 + num_2, num_3); + randomValueType(types, random()), values, num_1 + num_2, num_3); } writer.forceMerge(1); @@ -214,28 +214,28 @@ public class TestTypePromotion extends LuceneTestCase { doc.add(new Field("id", i + "", TextField.TYPE_STORED)); switch (valueType) { case VAR_INTS: - values[i] = random.nextInt(); + values[i] = random().nextInt(); valField.setLongValue(values[i]); break; case FIXED_INTS_16: - values[i] = random.nextInt(Short.MAX_VALUE); + values[i] = random().nextInt(Short.MAX_VALUE); valField.setIntValue((short) values[i]); break; case FIXED_INTS_32: - values[i] = random.nextInt(); + values[i] = random().nextInt(); valField.setIntValue((int) values[i]); break; case FIXED_INTS_64: - values[i] = random.nextLong(); + values[i] = random().nextLong(); valField.setLongValue(values[i]); break; case FLOAT_64: - double nextDouble = random.nextDouble(); + double nextDouble = random().nextDouble(); values[i] = Double.doubleToRawLongBits(nextDouble); valField.setDoubleValue(nextDouble); break; case FLOAT_32: - final float nextFloat = random.nextFloat(); + final float nextFloat = random().nextFloat(); values[i] = Double.doubleToRawLongBits(nextFloat); valField.setFloatValue(nextFloat); break; @@ -246,7 +246,7 @@ public class TestTypePromotion extends LuceneTestCase { case BYTES_FIXED_DEREF: case BYTES_FIXED_SORTED: case BYTES_FIXED_STRAIGHT: - values[i] = random.nextLong(); + values[i] = random().nextLong(); byte bytes[] = new byte[8]; ByteArrayDataOutput out = new ByteArrayDataOutput(bytes, 0, 8); out.writeLong(values[i]); @@ -258,12 +258,12 @@ public class TestTypePromotion extends LuceneTestCase { byte lbytes[] = new byte[8]; ByteArrayDataOutput lout = new ByteArrayDataOutput(lbytes, 0, 8); final int len; - if (random.nextBoolean()) { - values[i] = random.nextInt(); + if (random().nextBoolean()) { + values[i] = random().nextInt(); lout.writeInt((int)values[i]); len = 4; } else { - values[i] = random.nextLong(); + values[i] = random().nextLong(); lout.writeLong(values[i]); len = 8; } @@ -275,7 +275,7 @@ public class TestTypePromotion extends LuceneTestCase { } doc.add(valField); writer.addDocument(doc); - if (random.nextInt(10) == 0) { + if (random().nextInt(10) == 0) { writer.commit(); } } @@ -300,26 +300,26 @@ public class TestTypePromotion extends LuceneTestCase { public void testMergeIncompatibleTypes() throws IOException { Directory dir = newDirectory(); - IndexWriterConfig writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); writerConfig.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES); // no merges until we are done with adding values IndexWriter writer = new IndexWriter(dir, writerConfig); int num_1 = atLeast(200); int num_2 = atLeast(200); long[] values = new long[num_1 + num_2]; index(writer, - randomValueType(INTEGERS, random), values, 0, num_1); + randomValueType(INTEGERS, random()), values, 0, num_1); writer.commit(); - if (random.nextInt(4) == 0) { + if (random().nextInt(4) == 0) { // once in a while use addIndexes Directory dir_2 = newDirectory() ; IndexWriter writer_2 = new IndexWriter(dir_2, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); index(writer_2, - randomValueType(random.nextBoolean() ? UNSORTED_BYTES : SORTED_BYTES, random), values, num_1, num_2); + randomValueType(random().nextBoolean() ? UNSORTED_BYTES : SORTED_BYTES, random()), values, num_1, num_2); writer_2.commit(); writer_2.close(); - if (random.nextBoolean()) { + if (random().nextBoolean()) { writer.addIndexes(dir_2); } else { // do a real merge here @@ -330,11 +330,11 @@ public class TestTypePromotion extends LuceneTestCase { dir_2.close(); } else { index(writer, - randomValueType(random.nextBoolean() ? UNSORTED_BYTES : SORTED_BYTES, random), values, num_1, num_2); + randomValueType(random().nextBoolean() ? UNSORTED_BYTES : SORTED_BYTES, random()), values, num_1, num_2); writer.commit(); } writer.close(); - writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); if (writerConfig.getMergePolicy() instanceof NoMergePolicy) { writerConfig.setMergePolicy(newLogMergePolicy()); // make sure we merge to one segment (merge everything together) } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java b/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java index 850b507d1bb..c5ed1748788 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java @@ -45,9 +45,9 @@ public class TestUniqueTermCount extends LuceneTestCase { super.setUp(); dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()); + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()); config.setSimilarity(new TestSimilarity()); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config); Document doc = new Document(); Field foo = newField("foo", "", TextField.TYPE_UNSTORED); doc.add(foo); @@ -79,10 +79,10 @@ public class TestUniqueTermCount extends LuceneTestCase { private String addValue() { StringBuilder sb = new StringBuilder(); HashSet terms = new HashSet(); - int num = _TestUtil.nextInt(random, 0, 255); + int num = _TestUtil.nextInt(random(), 0, 255); for (int i = 0; i < num; i++) { sb.append(' '); - char term = (char) _TestUtil.nextInt(random, 'a', 'z'); + char term = (char) _TestUtil.nextInt(random(), 'a', 'z'); sb.append(term); terms.add("" + term); } diff --git a/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java index f12f4c6324c..90646302c9b 100644 --- a/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java @@ -95,10 +95,10 @@ public class BaseTestRangeFilter extends LuceneTestCase { @BeforeClass public static void beforeClassBaseTestRangeFilter() throws Exception { maxId = atLeast(500); - signedIndexDir = new TestIndex(random, Integer.MAX_VALUE, Integer.MIN_VALUE, true); - unsignedIndexDir = new TestIndex(random, Integer.MAX_VALUE, 0, false); - signedIndexReader = build(random, signedIndexDir); - unsignedIndexReader = build(random, unsignedIndexDir); + signedIndexDir = new TestIndex(random(), Integer.MAX_VALUE, Integer.MIN_VALUE, true); + unsignedIndexDir = new TestIndex(random(), Integer.MAX_VALUE, 0, false); + signedIndexReader = build(random(), signedIndexDir); + unsignedIndexReader = build(random(), unsignedIndexDir); } @AfterClass diff --git a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java index da5b24f06fe..36736c09bac 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java @@ -46,7 +46,7 @@ public class TestAutomatonQuery extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); Document doc = new Document(); Field titleField = newField("title", "some title", TextField.TYPE_UNSTORED); Field field = newField(FN, "this is document one 2345", TextField.TYPE_UNSTORED); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java index ad043b13456..d03fbaccf6e 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java @@ -46,7 +46,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); Document doc = new Document(); Field titleField = newField("title", "some title", TextField.TYPE_UNSTORED); Field field = newField(FN, "", TextField.TYPE_UNSTORED); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java index ff9c6bb7c3f..c5d8bf45e35 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java @@ -56,7 +56,7 @@ public class TestBoolean2 extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); - RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); doc.add(newField(field, docFields[i], TextField.TYPE_UNSTORED)); @@ -67,23 +67,23 @@ public class TestBoolean2 extends LuceneTestCase { searcher = new IndexSearcher(littleReader); // Make big index - dir2 = new MockDirectoryWrapper(random, new RAMDirectory(directory, IOContext.DEFAULT)); + dir2 = new MockDirectoryWrapper(random(), new RAMDirectory(directory, IOContext.DEFAULT)); // First multiply small test index: mulFactor = 1; int docCount = 0; do { - final Directory copy = new MockDirectoryWrapper(random, new RAMDirectory(dir2, IOContext.DEFAULT)); - RandomIndexWriter w = new RandomIndexWriter(random, dir2); + final Directory copy = new MockDirectoryWrapper(random(), new RAMDirectory(dir2, IOContext.DEFAULT)); + RandomIndexWriter w = new RandomIndexWriter(random(), dir2); w.addIndexes(copy); docCount = w.maxDoc(); w.close(); mulFactor *= 2; } while(docCount < 3000); - RandomIndexWriter w = new RandomIndexWriter(random, dir2, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); + RandomIndexWriter w = new RandomIndexWriter(random(), dir2, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) + .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); Document doc = new Document(); doc.add(newField("field2", "xxx", TextField.TYPE_UNSTORED)); for(int i=0;i numTerms) { - terms.remove(random.nextInt(terms.size())); + terms.remove(random().nextInt(terms.size())); } if (VERBOSE) { @@ -256,13 +256,13 @@ public class TestBooleanQuery extends LuceneTestCase { final int nextUpto; final int nextDoc; final int left = hits.size() - upto; - if (left == 1 || random.nextBoolean()) { + if (left == 1 || random().nextBoolean()) { // next nextUpto = 1+upto; nextDoc = scorer.nextDoc(); } else { // advance - int inc = _TestUtil.nextInt(random, 1, left-1); + int inc = _TestUtil.nextInt(random(), 1, left-1); nextUpto = inc + upto; nextDoc = scorer.advance(hits.get(nextUpto).doc); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java index 16420e310ba..b37e015e876 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java @@ -42,7 +42,7 @@ public class TestBooleanScorer extends LuceneTestCase String[] values = new String[] { "1", "2", "3", "4" }; - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); for (int i = 0; i < values.length; i++) { Document doc = new Document(); doc.add(newField(FIELD, values[i], StringField.TYPE_STORED)); @@ -73,7 +73,7 @@ public class TestBooleanScorer extends LuceneTestCase // changes, we have a test to back it up. Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); writer.commit(); IndexReader ir = writer.getReader(); writer.close(); @@ -127,7 +127,7 @@ public class TestBooleanScorer extends LuceneTestCase public void testMoreThan32ProhibitedClauses() throws Exception { final Directory d = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, d); + final RandomIndexWriter w = new RandomIndexWriter(random(), d); Document doc = new Document(); doc.add(new TextField("field", "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33")); w.addDocument(doc); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java index 873a3dabe82..ed9218d9a85 100755 --- a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java @@ -153,7 +153,7 @@ public class TestCachingCollector extends LuceneTestCase { // caching would terminate even if a smaller length would suffice. // set RAM limit enough for 150 docs + random(10000) - int numDocs = random.nextInt(10000) + 150; + int numDocs = random().nextInt(10000) + 150; for (boolean cacheScores : new boolean[] { false, true }) { int bytesPerDoc = cacheScores ? 8 : 4; CachingCollector cc = CachingCollector.create(new NoOpCollector(false), diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java index 8cdb5455c94..4949cd789d8 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java @@ -39,7 +39,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testCachingWorks() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(IndexReader.open(dir)); @@ -65,7 +65,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testNullDocIdSet() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(IndexReader.open(dir)); @@ -88,7 +88,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testNullDocIdSetIterator() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(IndexReader.open(dir)); @@ -132,7 +132,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testIsCacheAble() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(new Document()); writer.close(); @@ -159,9 +159,9 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testEnforceDeletions() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter( - random, + random(), dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMergeScheduler(new SerialMergeScheduler()). // asserts below requires no unexpected merges: setMergePolicy(newLogMergePolicy(10)) diff --git a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java index bf7bbbc53d8..32d4f75692f 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java @@ -87,7 +87,7 @@ public class TestConstantScoreQuery extends LuceneTestCase { IndexSearcher searcher = null; try { directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter (random, directory); + RandomIndexWriter writer = new RandomIndexWriter (random(), directory); Document doc = new Document(); doc.add(newField("field", "term", StringField.TYPE_UNSTORED)); @@ -132,7 +132,7 @@ public class TestConstantScoreQuery extends LuceneTestCase { public void testConstantScoreQueryAndFilter() throws Exception { Directory d = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, d); + RandomIndexWriter w = new RandomIndexWriter(random(), d); Document doc = new Document(); doc.add(newField("field", "a", StringField.TYPE_UNSTORED)); w.addDocument(doc); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java b/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java index 71ed0bbe958..3d58e7599ef 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java @@ -50,8 +50,8 @@ public class TestCustomSearcherSort extends LuceneTestCase { super.setUp(); INDEX_SIZE = atLeast(2000); index = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, index); - RandomGen random = new RandomGen(LuceneTestCase.random); + RandomIndexWriter writer = new RandomIndexWriter(random(), index); + RandomGen random = new RandomGen(random()); for (int i = 0; i < INDEX_SIZE; ++i) { // don't decrease; if to low the // problem doesn't show up Document doc = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java index 40a4bb4f9cf..2e4c6b89551 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java @@ -42,7 +42,7 @@ public class TestDateFilter extends LuceneTestCase { public void testBefore() throws IOException { // create an index Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); long now = System.currentTimeMillis(); @@ -107,7 +107,7 @@ public class TestDateFilter extends LuceneTestCase { public void testAfter() throws IOException { // create an index Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); long now = System.currentTimeMillis(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java b/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java index 3031ec1a2b3..d7bb05ef284 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java @@ -48,7 +48,7 @@ public class TestDateSort extends LuceneTestCase { super.setUp(); // Create an index writer. directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); // oldest doc: // Add the first document. text = "Document 1" dateTime = Oct 10 03:25:22 EDT 2007 diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java index b680bc6eb01..e592c2b3ff8 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java @@ -91,8 +91,8 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { super.setUp(); index = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, index, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) + RandomIndexWriter writer = new RandomIndexWriter(random(), index, + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setSimilarity(sim).setMergePolicy(newLogMergePolicy())); // hed is the most important field, dek is secondary @@ -167,7 +167,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { dq.add(tq("id", "d1")); dq.add(tq("dek", "DOES_NOT_EXIST")); - QueryUtils.check(random, dq, s); + QueryUtils.check(random(), dq, s); assertTrue(s.getTopReaderContext() instanceof AtomicReaderContext); final Weight dw = s.createNormalizedWeight(dq); AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext(); @@ -184,7 +184,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { dq.add(tq("dek", "albino")); dq.add(tq("dek", "DOES_NOT_EXIST")); assertTrue(s.getTopReaderContext() instanceof AtomicReaderContext); - QueryUtils.check(random, dq, s); + QueryUtils.check(random(), dq, s); final Weight dw = s.createNormalizedWeight(dq); AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext(); final Scorer ds = dw.scorer(context, true, false, context.reader().getLiveDocs()); @@ -198,7 +198,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); q.add(tq("hed", "albino")); q.add(tq("hed", "elephant")); - QueryUtils.check(random, q, s); + QueryUtils.check(random(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -222,7 +222,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); q.add(tq("dek", "albino")); q.add(tq("dek", "elephant")); - QueryUtils.check(random, q, s); + QueryUtils.check(random(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -247,7 +247,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q.add(tq("hed", "elephant")); q.add(tq("dek", "albino")); q.add(tq("dek", "elephant")); - QueryUtils.check(random, q, s); + QueryUtils.check(random(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -270,7 +270,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.01f); q.add(tq("dek", "albino")); q.add(tq("dek", "elephant")); - QueryUtils.check(random, q, s); + QueryUtils.check(random(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -298,7 +298,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q1.add(tq("hed", "albino")); q1.add(tq("dek", "albino")); q.add(q1, BooleanClause.Occur.MUST);// true,false); - QueryUtils.check(random, q1, s); + QueryUtils.check(random(), q1, s); } { @@ -306,10 +306,10 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q2.add(tq("hed", "elephant")); q2.add(tq("dek", "elephant")); q.add(q2, BooleanClause.Occur.MUST);// true,false); - QueryUtils.check(random, q2, s); + QueryUtils.check(random(), q2, s); } - QueryUtils.check(random, q, s); + QueryUtils.check(random(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -341,7 +341,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q2.add(tq("dek", "elephant")); q.add(q2, BooleanClause.Occur.SHOULD);// false,false); } - QueryUtils.check(random, q, s); + QueryUtils.check(random(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -377,7 +377,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q2.add(tq("dek", "elephant")); q.add(q2, BooleanClause.Occur.SHOULD);// false,false); } - QueryUtils.check(random, q, s); + QueryUtils.check(random(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -431,7 +431,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q2.add(tq("dek", "elephant")); q.add(q2, BooleanClause.Occur.SHOULD);// false,false); } - QueryUtils.check(random, q, s); + QueryUtils.check(random(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java b/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java index b1540405403..08d01f33478 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java @@ -36,7 +36,7 @@ public class TestDocBoost extends LuceneTestCase { public void testDocBoost() throws Exception { Directory store = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(random(), store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); Field f1 = newField("field", "word", TextField.TYPE_STORED); Field f2 = newField("field", "word", TextField.TYPE_STORED); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java b/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java index 43b1c0e46ff..ab82e29eea3 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java @@ -101,7 +101,7 @@ public class TestDocIdSet extends LuceneTestCase { // Tests that if a Filter produces a null DocIdSet, which is given to // IndexSearcher, everything works fine. This came up in LUCENE-1754. Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newField("c", "val", StringField.TYPE_UNSTORED)); writer.addDocument(doc); @@ -127,7 +127,7 @@ public class TestDocIdSet extends LuceneTestCase { public void testNullIteratorFilteredDocIdSet() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newField("c", "val", StringField.TYPE_UNSTORED)); writer.addDocument(doc); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java index c8fee4341a6..70d5dfff620 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java @@ -52,7 +52,7 @@ public class TestDocValuesScoring extends LuceneTestCase { "Lucene3x".equals(Codec.getDefault().getName())); Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); Field field = newField("foo", "", TextField.TYPE_UNSTORED); doc.add(field); @@ -98,8 +98,8 @@ public class TestDocValuesScoring extends LuceneTestCase { // in this case, we searched on field "foo". first document should have 2x the score. TermQuery tq = new TermQuery(new Term("foo", "quick")); - QueryUtils.check(random, tq, searcher1); - QueryUtils.check(random, tq, searcher2); + QueryUtils.check(random(), tq, searcher1); + QueryUtils.check(random(), tq, searcher2); TopDocs noboost = searcher1.search(tq, 10); TopDocs boost = searcher2.search(tq, 10); @@ -111,8 +111,8 @@ public class TestDocValuesScoring extends LuceneTestCase { // this query matches only the second document, which should have 4x the score. tq = new TermQuery(new Term("foo", "jumps")); - QueryUtils.check(random, tq, searcher1); - QueryUtils.check(random, tq, searcher2); + QueryUtils.check(random(), tq, searcher1); + QueryUtils.check(random(), tq, searcher2); noboost = searcher1.search(tq, 10); boost = searcher2.search(tq, 10); @@ -124,8 +124,8 @@ public class TestDocValuesScoring extends LuceneTestCase { // search on on field bar just for kicks, nothing should happen, since we setup // our sim provider to only use foo_boost for field foo. tq = new TermQuery(new Term("bar", "quick")); - QueryUtils.check(random, tq, searcher1); - QueryUtils.check(random, tq, searcher2); + QueryUtils.check(random(), tq, searcher1); + QueryUtils.check(random(), tq, searcher2); noboost = searcher1.search(tq, 10); boost = searcher2.search(tq, 10); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java index dfa5fc597f8..fa04a5b52c4 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java @@ -39,7 +39,7 @@ public class TestElevationComparator extends LuceneTestCase { Directory directory = newDirectory(); IndexWriter writer = new IndexWriter( directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMaxBufferedDocs(2). setMergePolicy(newLogMergePolicy(1000)). setSimilarity(new DefaultSimilarity()) diff --git a/lucene/core/src/test/org/apache/lucene/search/TestExplanations.java b/lucene/core/src/test/org/apache/lucene/search/TestExplanations.java index 568636c784f..16dbb0a9e15 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestExplanations.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestExplanations.java @@ -71,7 +71,7 @@ public class TestExplanations extends LuceneTestCase { @BeforeClass public static void beforeClassTestExplanations() throws Exception { directory = newDirectory(); - RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); doc.add(newField(KEY, ""+i, StringField.TYPE_UNSTORED)); @@ -95,7 +95,7 @@ public class TestExplanations extends LuceneTestCase { /** check the expDocNrs first, then check the query (and the explanations) */ public void qtest(Query q, int[] expDocNrs) throws Exception { - CheckHits.checkHitCollector(random, q, FIELD, searcher, expDocNrs); + CheckHits.checkHitCollector(random(), q, FIELD, searcher, expDocNrs); } /** diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java index 3d31c26c41c..be4542ccbd3 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java @@ -53,7 +53,7 @@ public class TestFieldCache extends LuceneTestCase { NUM_DOCS = atLeast(500); NUM_ORDS = atLeast(2); directory = newDirectory(); - RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); long theLong = Long.MAX_VALUE; double theDouble = Double.MAX_VALUE; byte theByte = Byte.MAX_VALUE; @@ -82,13 +82,13 @@ public class TestFieldCache extends LuceneTestCase { } // sometimes skip the field: - if (random.nextInt(40) != 17) { + if (random().nextInt(40) != 17) { unicodeStrings[i] = generateString(i); doc.add(newField("theRandomUnicodeString", unicodeStrings[i], StringField.TYPE_STORED)); } // sometimes skip the field: - if (random.nextInt(10) != 8) { + if (random().nextInt(10) != 8) { for (int j = 0; j < NUM_ORDS; j++) { String newValue = generateString(i); multiValued[i][j] = new BytesRef(newValue); @@ -128,54 +128,54 @@ public class TestFieldCache extends LuceneTestCase { public void test() throws IOException { FieldCache cache = FieldCache.DEFAULT; - double [] doubles = cache.getDoubles(reader, "theDouble", random.nextBoolean()); - assertSame("Second request to cache return same array", doubles, cache.getDoubles(reader, "theDouble", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles(reader, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, random.nextBoolean())); + double [] doubles = cache.getDoubles(reader, "theDouble", random().nextBoolean()); + assertSame("Second request to cache return same array", doubles, cache.getDoubles(reader, "theDouble", random().nextBoolean())); + assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles(reader, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, random().nextBoolean())); assertTrue("doubles Size: " + doubles.length + " is not: " + NUM_DOCS, doubles.length == NUM_DOCS); for (int i = 0; i < doubles.length; i++) { assertTrue(doubles[i] + " does not equal: " + (Double.MAX_VALUE - i), doubles[i] == (Double.MAX_VALUE - i)); } - long [] longs = cache.getLongs(reader, "theLong", random.nextBoolean()); - assertSame("Second request to cache return same array", longs, cache.getLongs(reader, "theLong", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", longs, cache.getLongs(reader, "theLong", FieldCache.DEFAULT_LONG_PARSER, random.nextBoolean())); + long [] longs = cache.getLongs(reader, "theLong", random().nextBoolean()); + assertSame("Second request to cache return same array", longs, cache.getLongs(reader, "theLong", random().nextBoolean())); + assertSame("Second request with explicit parser return same array", longs, cache.getLongs(reader, "theLong", FieldCache.DEFAULT_LONG_PARSER, random().nextBoolean())); assertTrue("longs Size: " + longs.length + " is not: " + NUM_DOCS, longs.length == NUM_DOCS); for (int i = 0; i < longs.length; i++) { assertTrue(longs[i] + " does not equal: " + (Long.MAX_VALUE - i) + " i=" + i, longs[i] == (Long.MAX_VALUE - i)); } - byte [] bytes = cache.getBytes(reader, "theByte", random.nextBoolean()); - assertSame("Second request to cache return same array", bytes, cache.getBytes(reader, "theByte", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", bytes, cache.getBytes(reader, "theByte", FieldCache.DEFAULT_BYTE_PARSER, random.nextBoolean())); + byte [] bytes = cache.getBytes(reader, "theByte", random().nextBoolean()); + assertSame("Second request to cache return same array", bytes, cache.getBytes(reader, "theByte", random().nextBoolean())); + assertSame("Second request with explicit parser return same array", bytes, cache.getBytes(reader, "theByte", FieldCache.DEFAULT_BYTE_PARSER, random().nextBoolean())); assertTrue("bytes Size: " + bytes.length + " is not: " + NUM_DOCS, bytes.length == NUM_DOCS); for (int i = 0; i < bytes.length; i++) { assertTrue(bytes[i] + " does not equal: " + (Byte.MAX_VALUE - i), bytes[i] == (byte) (Byte.MAX_VALUE - i)); } - short [] shorts = cache.getShorts(reader, "theShort", random.nextBoolean()); - assertSame("Second request to cache return same array", shorts, cache.getShorts(reader, "theShort", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", shorts, cache.getShorts(reader, "theShort", FieldCache.DEFAULT_SHORT_PARSER, random.nextBoolean())); + short [] shorts = cache.getShorts(reader, "theShort", random().nextBoolean()); + assertSame("Second request to cache return same array", shorts, cache.getShorts(reader, "theShort", random().nextBoolean())); + assertSame("Second request with explicit parser return same array", shorts, cache.getShorts(reader, "theShort", FieldCache.DEFAULT_SHORT_PARSER, random().nextBoolean())); assertTrue("shorts Size: " + shorts.length + " is not: " + NUM_DOCS, shorts.length == NUM_DOCS); for (int i = 0; i < shorts.length; i++) { assertTrue(shorts[i] + " does not equal: " + (Short.MAX_VALUE - i), shorts[i] == (short) (Short.MAX_VALUE - i)); } - int [] ints = cache.getInts(reader, "theInt", random.nextBoolean()); - assertSame("Second request to cache return same array", ints, cache.getInts(reader, "theInt", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", ints, cache.getInts(reader, "theInt", FieldCache.DEFAULT_INT_PARSER, random.nextBoolean())); + int [] ints = cache.getInts(reader, "theInt", random().nextBoolean()); + assertSame("Second request to cache return same array", ints, cache.getInts(reader, "theInt", random().nextBoolean())); + assertSame("Second request with explicit parser return same array", ints, cache.getInts(reader, "theInt", FieldCache.DEFAULT_INT_PARSER, random().nextBoolean())); assertTrue("ints Size: " + ints.length + " is not: " + NUM_DOCS, ints.length == NUM_DOCS); for (int i = 0; i < ints.length; i++) { assertTrue(ints[i] + " does not equal: " + (Integer.MAX_VALUE - i), ints[i] == (Integer.MAX_VALUE - i)); } - float [] floats = cache.getFloats(reader, "theFloat", random.nextBoolean()); - assertSame("Second request to cache return same array", floats, cache.getFloats(reader, "theFloat", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", floats, cache.getFloats(reader, "theFloat", FieldCache.DEFAULT_FLOAT_PARSER, random.nextBoolean())); + float [] floats = cache.getFloats(reader, "theFloat", random().nextBoolean()); + assertSame("Second request to cache return same array", floats, cache.getFloats(reader, "theFloat", random().nextBoolean())); + assertSame("Second request with explicit parser return same array", floats, cache.getFloats(reader, "theFloat", FieldCache.DEFAULT_FLOAT_PARSER, random().nextBoolean())); assertTrue("floats Size: " + floats.length + " is not: " + NUM_DOCS, floats.length == NUM_DOCS); for (int i = 0; i < floats.length; i++) { assertTrue(floats[i] + " does not equal: " + (Float.MAX_VALUE - i), floats[i] == (Float.MAX_VALUE - i)); @@ -224,7 +224,7 @@ public class TestFieldCache extends LuceneTestCase { // seek the enum around (note this isn't a great test here) int num = atLeast(100); for (int i = 0; i < num; i++) { - int k = _TestUtil.nextInt(random, 1, nTerms-1); + int k = _TestUtil.nextInt(random(), 1, nTerms-1); BytesRef val1 = termsIndex.lookup(k, val); assertEquals(TermsEnum.SeekStatus.FOUND, tenum.seekCeil(val1)); assertEquals(val1, tenum.term()); @@ -292,7 +292,7 @@ public class TestFieldCache extends LuceneTestCase { public void testEmptyIndex() throws Exception { Directory dir = newDirectory(); - IndexWriter writer= new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(500)); + IndexWriter writer= new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(500)); writer.close(); IndexReader r = DirectoryReader.open(dir); AtomicReader reader = SlowCompositeReaderWrapper.wrap(r); @@ -305,16 +305,16 @@ public class TestFieldCache extends LuceneTestCase { private static String generateString(int i) { String s = null; - if (i > 0 && random.nextInt(3) == 1) { + if (i > 0 && random().nextInt(3) == 1) { // reuse past string -- try to find one that's not null for(int iter = 0; iter < 10 && s == null;iter++) { - s = unicodeStrings[random.nextInt(i)]; + s = unicodeStrings[random().nextInt(i)]; } if (s == null) { - s = _TestUtil.randomUnicodeString(random); + s = _TestUtil.randomUnicodeString(random()); } } else { - s = _TestUtil.randomUnicodeString(random); + s = _TestUtil.randomUnicodeString(random()); } return s; } @@ -348,7 +348,7 @@ public class TestFieldCache extends LuceneTestCase { } } - int[] numInts = cache.getInts(reader, "numInt", random.nextBoolean()); + int[] numInts = cache.getInts(reader, "numInt", random().nextBoolean()); docsWithField = cache.getDocsWithField(reader, "numInt"); for (int i = 0; i < docsWithField.length(); i++) { if (i%2 == 0) { @@ -384,7 +384,7 @@ public class TestFieldCache extends LuceneTestCase { try { while(!failed.get()) { - final int op = random.nextInt(3); + final int op = random().nextInt(3); if (op == 0) { // Purge all caches & resume, once all // threads get here: diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java index 1770d2d43e6..a71b969b9e4 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java @@ -524,7 +524,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { @Test public void testSparseIndex() throws IOException { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for (int d = -20; d <= 20; d++) { Document doc = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java index 672b16a6511..be35f6ec9f2 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java @@ -37,7 +37,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase { public void testMissingTerms() throws Exception { String fieldName = "field1"; Directory rd = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, rd); + RandomIndexWriter w = new RandomIndexWriter(random(), rd); for (int i = 0; i < 100; i++) { Document doc = new Document(); int term = i * 10; //terms are units of 10; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java index 6dc8425691d..3175b4f2ad9 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java @@ -35,8 +35,8 @@ public class TestFieldValueFilter extends LuceneTestCase { public void testFieldValueFilterNoValue() throws IOException { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); int docs = atLeast(10); int[] docStates = buildIndex(writer, docs); int numDocsNoValue = 0; @@ -63,8 +63,8 @@ public class TestFieldValueFilter extends LuceneTestCase { public void testFieldValueFilter() throws IOException { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); int docs = atLeast(10); int[] docStates = buildIndex(writer, docs); int numDocsWithValue = 0; @@ -93,7 +93,7 @@ public class TestFieldValueFilter extends LuceneTestCase { int[] docStates = new int[docs]; for (int i = 0; i < docs; i++) { Document doc = new Document(); - if (random.nextBoolean()) { + if (random().nextBoolean()) { docStates[i] = 1; doc.add(newField("some", "value", TextField.TYPE_STORED)); } @@ -102,9 +102,9 @@ public class TestFieldValueFilter extends LuceneTestCase { writer.addDocument(doc); } writer.commit(); - int numDeletes = random.nextInt(docs); + int numDeletes = random().nextInt(docs); for (int i = 0; i < numDeletes; i++) { - int docID = random.nextInt(docs); + int docID = random().nextInt(docs); writer.deleteDocuments(new Term("id", "" + docID)); docStates[docID] = 2; } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java index 2d6be6d7ac7..9e06f154abd 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java @@ -52,7 +52,7 @@ public class TestFilteredQuery extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter (random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter (random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); doc.add (newField("field", "one two three four five", TextField.TYPE_STORED)); @@ -121,7 +121,7 @@ public class TestFilteredQuery extends LuceneTestCase { ScoreDoc[] hits = searcher.search (filteredquery, null, 1000).scoreDocs; assertEquals (1, hits.length); assertEquals (1, hits[0].doc); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(random(), filteredquery,searcher); hits = searcher.search (filteredquery, null, 1000, new Sort(new SortField("sorter", SortField.Type.STRING))).scoreDocs; assertEquals (1, hits.length); @@ -130,23 +130,23 @@ public class TestFilteredQuery extends LuceneTestCase { filteredquery = new FilteredQueryRA(new TermQuery (new Term ("field", "one")), filter, useRandomAccess); hits = searcher.search (filteredquery, null, 1000).scoreDocs; assertEquals (2, hits.length); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(random(), filteredquery,searcher); filteredquery = new FilteredQueryRA(new MatchAllDocsQuery(), filter, useRandomAccess); hits = searcher.search (filteredquery, null, 1000).scoreDocs; assertEquals (2, hits.length); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(random(), filteredquery,searcher); filteredquery = new FilteredQueryRA(new TermQuery (new Term ("field", "x")), filter, useRandomAccess); hits = searcher.search (filteredquery, null, 1000).scoreDocs; assertEquals (1, hits.length); assertEquals (3, hits[0].doc); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(random(), filteredquery,searcher); filteredquery = new FilteredQueryRA(new TermQuery (new Term ("field", "y")), filter, useRandomAccess); hits = searcher.search (filteredquery, null, 1000).scoreDocs; assertEquals (0, hits.length); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(random(), filteredquery,searcher); // test boost Filter f = newStaticFilterA(); @@ -213,7 +213,7 @@ public class TestFilteredQuery extends LuceneTestCase { Query filteredquery = new FilteredQueryRA(rq, filter, useRandomAccess); ScoreDoc[] hits = searcher.search(filteredquery, null, 1000).scoreDocs; assertEquals(2, hits.length); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(random(), filteredquery,searcher); } public void testBooleanMUST() throws Exception { @@ -231,7 +231,7 @@ public class TestFilteredQuery extends LuceneTestCase { bq.add(query, BooleanClause.Occur.MUST); ScoreDoc[] hits = searcher.search(bq, null, 1000).scoreDocs; assertEquals(0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); } public void testBooleanSHOULD() throws Exception { @@ -249,7 +249,7 @@ public class TestFilteredQuery extends LuceneTestCase { bq.add(query, BooleanClause.Occur.SHOULD); ScoreDoc[] hits = searcher.search(bq, null, 1000).scoreDocs; assertEquals(2, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); } // Make sure BooleanQuery, which does out-of-order @@ -268,7 +268,7 @@ public class TestFilteredQuery extends LuceneTestCase { bq.add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.SHOULD); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, query, searcher); + QueryUtils.check(random(), query, searcher); } public void testChainedFilters() throws Exception { @@ -284,14 +284,14 @@ public class TestFilteredQuery extends LuceneTestCase { new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("field", "four")))), useRandomAccess); ScoreDoc[] hits = searcher.search(query, 10).scoreDocs; assertEquals(2, hits.length); - QueryUtils.check(random, query, searcher); + QueryUtils.check(random(), query, searcher); // one more: query = new TestFilteredQuery.FilteredQueryRA(query, new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("field", "five")))), useRandomAccess); hits = searcher.search(query, 10).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, query, searcher); + QueryUtils.check(random(), query, searcher); } public void testEqualsHashcode() throws Exception { @@ -337,11 +337,11 @@ public class TestFilteredQuery extends LuceneTestCase { private void assertRewrite(FilteredQuery fq, Class clazz) throws Exception { // assign crazy boost to FQ - final float boost = random.nextFloat() * 100.f; + final float boost = random().nextFloat() * 100.f; fq.setBoost(boost); // assign crazy boost to inner - final float innerBoost = random.nextFloat() * 100.f; + final float innerBoost = random().nextFloat() * 100.f; fq.getQuery().setBoost(innerBoost); // check the class and boosts of rewritten query diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java b/lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java index 7679453f19b..7cfdd3eba71 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java @@ -48,14 +48,14 @@ public class TestFilteredSearch extends LuceneTestCase { Directory directory = newDirectory(); int[] filterBits = {1, 36}; SimpleDocIdSetFilter filter = new SimpleDocIdSetFilter(filterBits); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); searchFiltered(writer, directory, filter, enforceSingleSegment); // run the test on more than one segment enforceSingleSegment = false; // reset - it is stateful filter.reset(); writer.close(); - writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy())); + writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy())); // we index 60 docs - this will create 6 segments searchFiltered(writer, directory, filter, enforceSingleSegment); writer.close(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java index 62dbf88eca8..701aa1b7c8f 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java @@ -39,7 +39,7 @@ public class TestFuzzyQuery extends LuceneTestCase { public void testFuzziness() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); addDoc("aaaaa", writer); addDoc("aaaab", writer); addDoc("aaabb", writer); @@ -191,7 +191,7 @@ public class TestFuzzyQuery extends LuceneTestCase { public void testFuzzinessLong() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); addDoc("aaaaaaa", writer); addDoc("segment", writer); @@ -287,14 +287,14 @@ public class TestFuzzyQuery extends LuceneTestCase { */ public void testTieBreaker() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); addDoc("a123456", writer); addDoc("c123456", writer); addDoc("d123456", writer); addDoc("e123456", writer); Directory directory2 = newDirectory(); - RandomIndexWriter writer2 = new RandomIndexWriter(random, directory2); + RandomIndexWriter writer2 = new RandomIndexWriter(random(), directory2); addDoc("a123456", writer2); addDoc("b123456", writer2); addDoc("b123456", writer2); @@ -321,7 +321,7 @@ public class TestFuzzyQuery extends LuceneTestCase { public void testTokenLengthOpt() throws IOException { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); addDoc("12345678911", writer); addDoc("segment", writer); @@ -357,7 +357,7 @@ public class TestFuzzyQuery extends LuceneTestCase { /** Test the TopTermsBoostOnlyBooleanQueryRewrite rewrite method. */ public void testBoostOnlyRewrite() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); addDoc("Lucene", writer); addDoc("Lucene", writer); addDoc("Lucenne", writer); @@ -380,9 +380,9 @@ public class TestFuzzyQuery extends LuceneTestCase { public void testGiga() throws Exception { - MockAnalyzer analyzer = new MockAnalyzer(random); + MockAnalyzer analyzer = new MockAnalyzer(random()); Directory index = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, index); + RandomIndexWriter w = new RandomIndexWriter(random(), index); addDoc("Lucene in Action", w); addDoc("Lucene for Dummies", w); @@ -417,7 +417,7 @@ public class TestFuzzyQuery extends LuceneTestCase { public void testDistanceAsEditsSearching() throws Exception { Directory index = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, index); + RandomIndexWriter w = new RandomIndexWriter(random(), index); addDoc("foobar", w); addDoc("test", w); addDoc("working", w); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery2.java b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery2.java index dc55dc83770..cda3d482bf5 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery2.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery2.java @@ -73,7 +73,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase { }; public void testFromTestData() throws Exception { // TODO: randomize! - assertFromTestData(mappings[random.nextInt(mappings.length)]); + assertFromTestData(mappings[random().nextInt(mappings.length)]); } public void assertFromTestData(int codePointTable[]) throws Exception { @@ -87,7 +87,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase { int terms = (int) Math.pow(2, bits); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); Field field = newField("field", "", TextField.TYPE_UNSTORED); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java index 11050ad28e6..c3eb15042ea 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java @@ -35,8 +35,14 @@ import org.apache.lucene.util.LuceneTestCase; * */ public class TestMatchAllDocsQuery extends LuceneTestCase { - private Analyzer analyzer = new MockAnalyzer(random); + private Analyzer analyzer; + @Override + public void setUp() throws Exception { + super.setUp(); + analyzer = new MockAnalyzer(random()); + } + public void testQuery() throws Exception { Directory dir = newDirectory(); IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java index 91a9d434ad3..70dffd49402 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java @@ -49,7 +49,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { public void testPhrasePrefix() throws IOException { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("blueberry pie", writer); add("blueberry strudel", writer); add("blueberry pizza", writer); @@ -141,7 +141,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { // LUCENE-2580 public void testTall() throws IOException { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("blueberry chocolate pie", writer); add("blueberry chocolate tart", writer); IndexReader r = writer.getReader(); @@ -160,7 +160,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { @Ignore //LUCENE-3821 fixes sloppy phrase scoring, except for this known problem public void testMultiSloppyWithRepeats() throws IOException { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("a b c d e f g h i k", writer); IndexReader r = writer.getReader(); writer.close(); @@ -180,7 +180,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { public void testMultiExactWithRepeats() throws IOException { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("a b c d e f g h i k", writer); IndexReader r = writer.getReader(); writer.close(); @@ -207,7 +207,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { // and all terms required. // The contained PhraseMultiQuery must contain exactly one term array. Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("blueberry pie", writer); add("blueberry chewing gum", writer); add("blue raspberry pie", writer); @@ -238,7 +238,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { public void testPhrasePrefixWithBooleanQuery() throws IOException { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("This is a test", "object", writer); add("a note", "note", writer); @@ -265,7 +265,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { public void testNoDocs() throws Exception { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("a note", "note", writer); IndexReader reader = writer.getReader(); @@ -328,7 +328,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { public void testCustomIDF() throws Exception { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("This is a test", "object", writer); add("a note", "note", writer); @@ -365,7 +365,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { tokens[2].append("c"); tokens[2].setPositionIncrement(0); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new TextField("field", new CannedTokenStream(tokens))); writer.addDocument(doc); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java index 7dd81d5f1bf..a4bb9755a9d 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java @@ -56,9 +56,9 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { "X 4 5 6" }; small = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, small, + RandomIndexWriter writer = new RandomIndexWriter(random(), small, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMergePolicy(newLogMergePolicy())); FieldType customType = new FieldType(TextField.TYPE_STORED); customType.setTokenized(false); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java index 28e7ec8f5b7..533fc7cdfbf 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java @@ -46,9 +46,9 @@ public class TestMultiTermQueryRewrites extends LuceneTestCase { dir = newDirectory(); sdir1 = newDirectory(); sdir2 = newDirectory(); - final RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockAnalyzer(random)); - final RandomIndexWriter swriter1 = new RandomIndexWriter(random, sdir1, new MockAnalyzer(random)); - final RandomIndexWriter swriter2 = new RandomIndexWriter(random, sdir2, new MockAnalyzer(random)); + final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockAnalyzer(random())); + final RandomIndexWriter swriter1 = new RandomIndexWriter(random(), sdir1, new MockAnalyzer(random())); + final RandomIndexWriter swriter2 = new RandomIndexWriter(random(), sdir2, new MockAnalyzer(random())); for (int i = 0; i < 10; i++) { Document doc = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java index 61f308c889d..2d96504047d 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java @@ -40,7 +40,7 @@ public class TestMultiThreadTermVectors extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); //writer.setUseCompoundFile(false); //writer.infoStream = System.out; FieldType customType = new FieldType(TextField.TYPE_STORED); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java index ae886c147a0..8dea2b9d285 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java @@ -40,17 +40,17 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase { */ public void testMultiValuedNRQ() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) + .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US)); int num = atLeast(500); for (int l = 0; l < num; l++) { Document doc = new Document(); - for (int m=0, c=random.nextInt(10); m<=c; m++) { - int value = random.nextInt(Integer.MAX_VALUE); + for (int m=0, c=random().nextInt(10); m<=c; m++) { + int value = random().nextInt(Integer.MAX_VALUE); doc.add(newField("asc", format.format(value), StringField.TYPE_UNSTORED)); doc.add(new IntField("trie", value)); } @@ -62,8 +62,8 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase { IndexSearcher searcher=newSearcher(reader); num = atLeast(50); for (int i = 0; i < num; i++) { - int lower=random.nextInt(Integer.MAX_VALUE); - int upper=random.nextInt(Integer.MAX_VALUE); + int lower=random().nextInt(Integer.MAX_VALUE); + int upper=random().nextInt(Integer.MAX_VALUE); if (lower>upper) { int a=lower; lower=upper; upper=a; } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNGramPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestNGramPhraseQuery.java index 7a28963e9bf..11a22b37e56 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestNGramPhraseQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestNGramPhraseQuery.java @@ -33,7 +33,7 @@ public class TestNGramPhraseQuery extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); writer.close(); reader = IndexReader.open(directory); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNRTManager.java b/lucene/core/src/test/org/apache/lucene/search/TestNRTManager.java index ed509df206e..8b89b32f0cd 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestNRTManager.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestNRTManager.java @@ -65,7 +65,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { @Override protected Directory getDirectory(Directory in) { // Randomly swap in NRTCachingDir - if (random.nextBoolean()) { + if (random().nextBoolean()) { if (VERBOSE) { System.out.println("TEST: wrap NRTCachingDir"); } @@ -81,7 +81,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { final long gen = genWriter.updateDocuments(id, docs); // Randomly verify the update "took": - if (random.nextInt(20) == 2) { + if (random().nextInt(20) == 2) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id); } @@ -104,7 +104,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { protected void addDocuments(Term id, List> docs) throws Exception { final long gen = genWriter.addDocuments(docs); // Randomly verify the add "took": - if (random.nextInt(20) == 2) { + if (random().nextInt(20) == 2) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id); } @@ -127,7 +127,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { final long gen = genWriter.addDocument(doc); // Randomly verify the add "took": - if (random.nextInt(20) == 2) { + if (random().nextInt(20) == 2) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id); } @@ -149,7 +149,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { protected void updateDocument(Term id, Iterable doc) throws Exception { final long gen = genWriter.updateDocument(id, doc); // Randomly verify the udpate "took": - if (random.nextInt(20) == 2) { + if (random().nextInt(20) == 2) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id); } @@ -171,7 +171,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { protected void deleteDocuments(Term id) throws Exception { final long gen = genWriter.deleteDocuments(id); // randomly verify the delete "took": - if (random.nextInt(20) == 7) { + if (random().nextInt(20) == 7) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": nrt: verify del " + id); } @@ -202,8 +202,8 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { @Override protected void doAfterWriter(final ExecutorService es) throws Exception { - final double minReopenSec = 0.01 + 0.05 * random.nextDouble(); - final double maxReopenSec = minReopenSec * (1.0 + 10 * random.nextDouble()); + final double minReopenSec = 0.01 + 0.05 * random().nextDouble(); + final double maxReopenSec = minReopenSec * (1.0 + 10 * random().nextDouble()); if (VERBOSE) { System.out.println("TEST: make NRTManager maxReopenSec=" + maxReopenSec + " minReopenSec=" + minReopenSec); @@ -261,7 +261,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { // Test doesn't assert deletions until the end, so we // can randomize whether dels must be applied final NRTManager nrt; - if (random.nextBoolean()) { + if (random().nextBoolean()) { nrt = nrtDeletes; } else { nrt = nrtNoDeletes; @@ -295,7 +295,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { * LUCENE-3528 - NRTManager hangs in certain situations */ public void testThreadStarvationNoDeleteNRTReader() throws IOException, InterruptedException { - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); Directory d = newDirectory(); final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch signal = new CountDownLatch(1); @@ -391,7 +391,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { public void testEvilSearcherFactory() throws Exception { final Directory dir = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, dir); + final RandomIndexWriter w = new RandomIndexWriter(random(), dir); w.commit(); final IndexReader other = DirectoryReader.open(dir); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNot.java b/lucene/core/src/test/org/apache/lucene/search/TestNot.java index c424aade49c..5b909dc392c 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestNot.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestNot.java @@ -34,7 +34,7 @@ public class TestNot extends LuceneTestCase { public void testNot() throws Exception { Directory store = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, store); + RandomIndexWriter writer = new RandomIndexWriter(random(), store); Document d1 = new Document(); d1.add(newField("field", "a b", TextField.TYPE_STORED)); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java index aa61e8360d1..cdb966a297d 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java @@ -56,9 +56,9 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { noDocs = atLeast(4096); distance = (1 << 30) / noDocs; directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 100, 1000)) + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) + .setMaxBufferedDocs(_TestUtil.nextInt(random(), 100, 1000)) .setMergePolicy(newLogMergePolicy())); final FieldType storedInt = new FieldType(IntField.TYPE); @@ -299,8 +299,8 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { @Test public void testInfiniteValues() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); doc.add(new FloatField("float", Float.NEGATIVE_INFINITY)); doc.add(new IntField("int", Integer.MIN_VALUE)); @@ -370,10 +370,10 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception { String field="field"+precisionStep; int totalTermCountT=0,totalTermCountC=0,termCountT,termCountC; - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { - int lower=(int)(random.nextDouble()*noDocs*distance)+startOffset; - int upper=(int)(random.nextDouble()*noDocs*distance)+startOffset; + int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset; + int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset; if (lower>upper) { int a=lower; lower=upper; upper=a; } @@ -493,10 +493,10 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { private void testRangeSplit(int precisionStep) throws Exception { String field="ascfield"+precisionStep; // 10 random tests - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(random(), 10, 20); for (int i =0; i< num; i++) { - int lower=(int)(random.nextDouble()*noDocs - noDocs/2); - int upper=(int)(random.nextDouble()*noDocs - noDocs/2); + int lower=(int)(random().nextDouble()*noDocs - noDocs/2); + int upper=(int)(random().nextDouble()*noDocs - noDocs/2); if (lower>upper) { int a=lower; lower=upper; upper=a; } @@ -569,10 +569,10 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { String field="field"+precisionStep; // 10 random tests, the index order is ascending, // so using a reverse sort field should retun descending documents - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { - int lower=(int)(random.nextDouble()*noDocs*distance)+startOffset; - int upper=(int)(random.nextDouble()*noDocs*distance)+startOffset; + int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset; + int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset; if (lower>upper) { int a=lower; lower=upper; upper=a; } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java index d4019a694c5..f228e516044 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java @@ -56,9 +56,9 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { noDocs = atLeast(4096); distance = (1L << 60) / noDocs; directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 100, 1000)) + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) + .setMaxBufferedDocs(_TestUtil.nextInt(random(), 100, 1000)) .setMergePolicy(newLogMergePolicy())); final FieldType storedLong = new FieldType(LongField.TYPE); @@ -324,8 +324,8 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { @Test public void testInfiniteValues() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); doc.add(new DoubleField("double", Double.NEGATIVE_INFINITY)); doc.add(new LongField("long", Long.MIN_VALUE)); @@ -395,10 +395,10 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception { String field="field"+precisionStep; int totalTermCountT=0,totalTermCountC=0,termCountT,termCountC; - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { - long lower=(long)(random.nextDouble()*noDocs*distance)+startOffset; - long upper=(long)(random.nextDouble()*noDocs*distance)+startOffset; + long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset; + long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset; if (lower>upper) { long a=lower; lower=upper; upper=a; } @@ -523,10 +523,10 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { private void testRangeSplit(int precisionStep) throws Exception { String field="ascfield"+precisionStep; // 10 random tests - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { - long lower=(long)(random.nextDouble()*noDocs - noDocs/2); - long upper=(long)(random.nextDouble()*noDocs - noDocs/2); + long lower=(long)(random().nextDouble()*noDocs - noDocs/2); + long upper=(long)(random().nextDouble()*noDocs - noDocs/2); if (lower>upper) { long a=lower; lower=upper; upper=a; } @@ -609,10 +609,10 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { String field="field"+precisionStep; // 10 random tests, the index order is ascending, // so using a reverse sort field should retun descending documents - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { - long lower=(long)(random.nextDouble()*noDocs*distance)+startOffset; - long upper=(long)(random.nextDouble()*noDocs*distance)+startOffset; + long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset; + long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset; if (lower>upper) { long a=lower; lower=upper; upper=a; } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java index d88c96b0b14..551f15bb2f6 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java @@ -41,7 +41,7 @@ public class TestPhrasePrefixQuery extends LuceneTestCase { */ public void testPhrasePrefix() throws IOException { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); Document doc1 = new Document(); Document doc2 = new Document(); Document doc3 = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java index 75d1e79bf8f..3b51b5d3d1a 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java @@ -17,31 +17,34 @@ package org.apache.lucene.search; * limitations under the License. */ -import org.apache.lucene.util.LuceneTestCase; +import java.io.*; +import java.util.*; + import org.apache.lucene.analysis.*; -import org.apache.lucene.analysis.tokenattributes.*; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.document.*; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.similarities.DefaultSimilarity; -import org.apache.lucene.store.*; -import org.apache.lucene.util.Version; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.*; import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; -import java.io.Reader; -import java.io.StringReader; -import java.util.List; -import java.util.ArrayList; -import java.util.Random; +import com.carrotsearch.randomizedtesting.annotations.Seed; /** * Tests {@link PhraseQuery}. * * @see TestPositionIncrement + */ +/* + * Remove ThreadLeaks and run with (Eclipse or command line): + * -ea -Drt.seed=AFD1E7E84B35D2B1 + * to get leaked thread errors. */ +// @ThreadLeaks(linger = 1000, leakedThreadsBelongToSuite = true) +@Seed("AFD1E7E84B35D2B1") public class TestPhraseQuery extends LuceneTestCase { /** threshold for comparing floats */ @@ -66,7 +69,7 @@ public class TestPhraseQuery extends LuceneTestCase { return 100; } }; - RandomIndexWriter writer = new RandomIndexWriter(random, directory, analyzer); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, analyzer); Document doc = new Document(); doc.add(newField("field", "one two three four five", TextField.TYPE_STORED)); @@ -111,7 +114,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "five")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); } public void testBarelyCloseEnough() throws Exception { @@ -120,7 +123,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "five")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); } /** @@ -132,7 +135,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "five")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("exact match", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); query = new PhraseQuery(); @@ -140,7 +143,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "one")); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("reverse not exact", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); } public void testSlop1() throws Exception { @@ -150,7 +153,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "two")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("in order", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); // Ensures slop of 1 does not work for phrases out of order; @@ -161,7 +164,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "one")); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("reversed, slop not 2 or more", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); } /** @@ -173,7 +176,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "one")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("just sloppy enough", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); query = new PhraseQuery(); @@ -182,7 +185,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "one")); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("not sloppy enough", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); } @@ -197,7 +200,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "five")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("two total moves", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); query = new PhraseQuery(); @@ -207,20 +210,20 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "one")); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("slop of 5 not close enough", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); query.setSlop(6); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("slop of 6 just right", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); } public void testPhraseQueryWithStopAnalyzer() throws Exception { Directory directory = newDirectory(); - Analyzer stopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, + Analyzer stopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig( Version.LUCENE_40, stopAnalyzer)); Document doc = new Document(); doc.add(newField("field", "the stop words are here", TextField.TYPE_STORED)); @@ -236,7 +239,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field","words")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); // StopAnalyzer as of 2.4 does not leave "holes", so this matches. @@ -245,7 +248,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "here")); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); reader.close(); @@ -254,7 +257,7 @@ public class TestPhraseQuery extends LuceneTestCase { public void testPhraseQueryInConjunctionScorer() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); Document doc = new Document(); doc.add(newField("source", "marketing info", TextField.TYPE_STORED)); @@ -275,7 +278,7 @@ public class TestPhraseQuery extends LuceneTestCase { phraseQuery.add(new Term("source", "info")); ScoreDoc[] hits = searcher.search(phraseQuery, null, 1000).scoreDocs; assertEquals(2, hits.length); - QueryUtils.check(random, phraseQuery,searcher); + QueryUtils.check(random(), phraseQuery,searcher); TermQuery termQuery = new TermQuery(new Term("contents","foobar")); @@ -284,13 +287,13 @@ public class TestPhraseQuery extends LuceneTestCase { booleanQuery.add(phraseQuery, BooleanClause.Occur.MUST); hits = searcher.search(booleanQuery, null, 1000).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, termQuery,searcher); + QueryUtils.check(random(), termQuery,searcher); reader.close(); - writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + writer = new RandomIndexWriter(random(), directory, + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); doc = new Document(); doc.add(newField("contents", "map entry woo", TextField.TYPE_STORED)); writer.addDocument(doc); @@ -330,7 +333,7 @@ public class TestPhraseQuery extends LuceneTestCase { booleanQuery.add(termQuery, BooleanClause.Occur.MUST); hits = searcher.search(booleanQuery, null, 1000).scoreDocs; assertEquals(2, hits.length); - QueryUtils.check(random, booleanQuery,searcher); + QueryUtils.check(random(), booleanQuery,searcher); reader.close(); @@ -339,8 +342,8 @@ public class TestPhraseQuery extends LuceneTestCase { public void testSlopScoring() throws IOException { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMergePolicy(newLogMergePolicy()) .setSimilarity(new DefaultSimilarity())); @@ -375,7 +378,7 @@ public class TestPhraseQuery extends LuceneTestCase { assertEquals(1, hits[1].doc); assertEquals(0.31, hits[2].score, 0.01); assertEquals(2, hits[2].doc); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); reader.close(); directory.close(); } @@ -399,13 +402,13 @@ public class TestPhraseQuery extends LuceneTestCase { ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("slop of 100 just right", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); query.setSlop(99); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("slop of 99 not enough", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); } // work on two docs like this: "phrase exist notexist exist found" @@ -418,7 +421,7 @@ public class TestPhraseQuery extends LuceneTestCase { ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("phrase without repetitions exists in 2 docs", 2, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); // phrase with repetitions that exists in 2 docs query = new PhraseQuery(); @@ -429,7 +432,7 @@ public class TestPhraseQuery extends LuceneTestCase { hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("phrase with repetitions exists in two docs", 2, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); // phrase I with repetitions that does not exist in any doc query = new PhraseQuery(); @@ -440,7 +443,7 @@ public class TestPhraseQuery extends LuceneTestCase { hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("nonexisting phrase with repetitions does not exist in any doc", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); // phrase II with repetitions that does not exist in any doc query = new PhraseQuery(); @@ -452,7 +455,7 @@ public class TestPhraseQuery extends LuceneTestCase { hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("nonexisting phrase with repetitions does not exist in any doc", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); } @@ -475,7 +478,7 @@ public class TestPhraseQuery extends LuceneTestCase { assertEquals("phrase found with exact phrase scorer", 1, hits.length); float score0 = hits[0].score; //System.out.println("(exact) field: two three: "+score0); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); // search on non palyndrome, find phrase with slop 2, though no slop required here. query.setSlop(2); // to use sloppy scorer @@ -484,7 +487,7 @@ public class TestPhraseQuery extends LuceneTestCase { float score1 = hits[0].score; //System.out.println("(sloppy) field: two three: "+score1); assertEquals("exact scorer and sloppy scorer score the same when slop does not matter",score0, score1, SCORE_COMP_THRESH); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); // search ordered in palyndrome, find it twice query = new PhraseQuery(); @@ -495,7 +498,7 @@ public class TestPhraseQuery extends LuceneTestCase { assertEquals("just sloppy enough", 1, hits.length); //float score2 = hits[0].score; //System.out.println("palindrome: two three: "+score2); - QueryUtils.check(random, query,searcher); + QueryUtils.check(random(), query,searcher); //commented out for sloppy-phrase efficiency (issue 736) - see SloppyPhraseScorer.phraseFreq(). //assertTrue("ordered scores higher in palindrome",score1+SCORE_COMP_THRESH> docs = new ArrayList>(); Document d = new Document(); Field f = newField("f", "", TextField.TYPE_UNSTORED); d.add(f); - Random r = random; + Random r = random(); int NUM_DOCS = atLeast(10); for (int i = 0; i < NUM_DOCS; i++) { // must be > 4096 so it spans multiple chunks - int termCount = _TestUtil.nextInt(random, 4097, 8200); + int termCount = _TestUtil.nextInt(random(), 4097, 8200); List doc = new ArrayList(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java b/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java index 85fa9c50e9d..9196e9ce1a8 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java @@ -89,7 +89,7 @@ public class TestPositionIncrement extends LuceneTestCase { } }; Directory store = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, store, analyzer); + RandomIndexWriter writer = new RandomIndexWriter(random(), store, analyzer); Document d = new Document(); d.add(newField("field", "bogus", TextField.TYPE_STORED)); writer.addDocument(d); @@ -204,7 +204,7 @@ public class TestPositionIncrement extends LuceneTestCase { public void testPayloadsPos0() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockPayloadAnalyzer()); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockPayloadAnalyzer()); Document doc = new Document(); doc.add(new TextField("content", new StringReader( "a a b c d e a f g h i j a b k k"))); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java index 06e1c8bdba7..a19f4bcb491 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java @@ -70,7 +70,7 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase { } Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); writer.commit(); IndexReader ir = writer.getReader(); writer.close(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java index 492a647e600..1064d246c02 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java @@ -37,7 +37,7 @@ public class TestPrefixFilter extends LuceneTestCase { "/Computers/Mac/One", "/Computers/Mac/Two", "/Computers/Windows"}; - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); for (int i = 0; i < categories.length; i++) { Document doc = new Document(); doc.add(newField("category", categories[i], StringField.TYPE_STORED)); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java index c5cb7efef41..f24a88eafe4 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java @@ -48,7 +48,7 @@ public class TestPrefixInBooleanQuery extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); Document doc = new Document(); Field field = newField(FIELD, "meaninglessnames", StringField.TYPE_UNSTORED); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java index 4a84dd3b4f7..f149cd9eb19 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java @@ -38,7 +38,7 @@ public class TestPrefixQuery extends LuceneTestCase { String[] categories = new String[] {"/Computers", "/Computers/Mac", "/Computers/Windows"}; - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); for (int i = 0; i < categories.length; i++) { Document doc = new Document(); doc.add(newField("category", categories[i], StringField.TYPE_STORED)); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java index 6e5d860ee43..2699293fa41 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java @@ -51,9 +51,9 @@ public class TestPrefixRandom extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)) + .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); Document doc = new Document(); Field field = newField("field", "", StringField.TYPE_UNSTORED); @@ -64,7 +64,7 @@ public class TestPrefixRandom extends LuceneTestCase { final String codec = Codec.getDefault().getName(); int num = codec.equals("Lucene3x") ? 200 * RANDOM_MULTIPLIER : atLeast(1000); for (int i = 0; i < num; i++) { - field.setStringValue(_TestUtil.randomUnicodeString(random, 10)); + field.setStringValue(_TestUtil.randomUnicodeString(random(), 10)); writer.addDocument(doc); } reader = writer.getReader(); @@ -118,7 +118,7 @@ public class TestPrefixRandom extends LuceneTestCase { public void testPrefixes() throws Exception { int num = atLeast(100); for (int i = 0; i < num; i++) - assertSame(_TestUtil.randomUnicodeString(random, 5)); + assertSame(_TestUtil.randomUnicodeString(random(), 5)); } /** check that the # of hits is the same as from a very diff --git a/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java index 47410667150..38fb547e128 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java @@ -35,7 +35,7 @@ public class TestQueryWrapperFilter extends LuceneTestCase { public void testBasic() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newField("field", "value", TextField.TYPE_UNSTORED)); writer.addDocument(doc); @@ -87,14 +87,14 @@ public class TestQueryWrapperFilter extends LuceneTestCase { public void testRandom() throws Exception { final Directory d = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, d); + final RandomIndexWriter w = new RandomIndexWriter(random(), d); w.w.getConfig().setMaxBufferedDocs(17); final int numDocs = atLeast(100); final Set aDocs = new HashSet(); for(int i=0;i terms = new ArrayList(); int num = atLeast(200); for (int i = 0; i < num; i++) { - String s = _TestUtil.randomUnicodeString(random); + String s = _TestUtil.randomUnicodeString(random()); field.setStringValue(s); terms.add(s); writer.addDocument(doc); @@ -143,7 +143,7 @@ public class TestRegexpRandom2 extends LuceneTestCase { // but for preflex codec, the test can be very slow, so use less iterations. int num = Codec.getDefault().getName().equals("Lucene3x") ? 100 * RANDOM_MULTIPLIER : atLeast(1000); for (int i = 0; i < num; i++) { - String reg = AutomatonTestUtil.randomRegexp(random); + String reg = AutomatonTestUtil.randomRegexp(random()); if (VERBOSE) { System.out.println("TEST: regexp=" + reg); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java index 21b307601df..7c5feda6248 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java @@ -100,7 +100,7 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase { public void testGetScores() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); writer.commit(); IndexReader ir = writer.getReader(); writer.close(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java b/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java index b41b1c72b10..4953c39188e 100755 --- a/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java @@ -48,7 +48,7 @@ public class TestScorerPerf extends LuceneTestCase { // Create a dummy index with nothing in it. // This could possibly fail if Lucene starts checking for docid ranges... d = newDirectory(); - IndexWriter iw = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))); iw.addDocument(new Document()); iw.close(); r = IndexReader.open(d); @@ -64,11 +64,11 @@ public class TestScorerPerf extends LuceneTestCase { terms[i] = new Term("f",Character.toString((char)('A'+i))); } - IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); for (int i=0; i=termsInIndex) tnum=termflag.nextClearBit(0); termflag.set(tnum); @@ -259,17 +259,17 @@ public class TestScorerPerf extends LuceneTestCase { int ret=0; long nMatches=0; for (int i=0; i=25) tnum=termflag.nextClearBit(0); termflag.set(tnum); @@ -299,10 +299,10 @@ public class TestScorerPerf extends LuceneTestCase { int ret=0; for (int i=0; i 200) { - Collections.shuffle(priorSearches, random); + Collections.shuffle(priorSearches, random()); priorSearches.subList(100, priorSearches.size()).clear(); } } @@ -272,7 +272,7 @@ public class TestShardSearching extends ShardSearchingTestBase { private PreviousSearchState assertSame(IndexSearcher mockSearcher, NodeState.ShardIndexSearcher shardSearcher, Query q, Sort sort, PreviousSearchState state) throws IOException { - int numHits = _TestUtil.nextInt(random, 1, 100); + int numHits = _TestUtil.nextInt(random(), 1, 100); if (state != null && state.searchAfterLocal == null) { // In addition to what we last searched: numHits += state.numHitsPaged; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java b/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java index e2021d4b325..2c155f00b51 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java @@ -54,8 +54,8 @@ public class TestSimilarity extends LuceneTestCase { public void testSimilarity() throws Exception { Directory store = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, store, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) + RandomIndexWriter writer = new RandomIndexWriter(random(), store, + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setSimilarity(new SimpleSimilarity())); Document d1 = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java b/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java index 4f52f03404e..6836569a345 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java @@ -46,8 +46,8 @@ public class TestSimilarityProvider extends LuceneTestCase { directory = newDirectory(); PerFieldSimilarityWrapper sim = new ExampleSimilarityProvider(); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setSimilarity(sim); - RandomIndexWriter iw = new RandomIndexWriter(random, directory, iwc); + new MockAnalyzer(random())).setSimilarity(sim); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, iwc); Document doc = new Document(); Field field = newField("foo", "", TextField.TYPE_UNSTORED); doc.add(field); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java index fc1782bc234..4c9d9e1a678 100755 --- a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java @@ -137,7 +137,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase { query.setSlop(slop); Directory ramDir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, ramDir, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + RandomIndexWriter writer = new RandomIndexWriter(random(), ramDir, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); writer.addDocument(doc); IndexReader reader = writer.getReader(); @@ -227,13 +227,13 @@ public class TestSloppyPhraseQuery extends LuceneTestCase { return false; } }); - QueryUtils.check(random, pq, searcher); + QueryUtils.check(random(), pq, searcher); } // LUCENE-3215 public void testSlopWithHoles() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir); FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setOmitNorms(true); Field f = new Field("lyrics", "", customType); @@ -270,7 +270,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase { String document = "drug druggy drug drug drug"; Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newField("lyrics", document, new FieldType(TextField.TYPE_UNSTORED))); iw.addDocument(doc); @@ -323,7 +323,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase { Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newField("lyrics", document, new FieldType(TextField.TYPE_UNSTORED))); iw.addDocument(doc); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java index 326f886e47a..41a981d7442 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java @@ -85,8 +85,8 @@ public class TestSloppyPhraseQuery2 extends SearchEquivalenceTestBase { Term t1 = randomTerm(); Term t2 = randomTerm(); Term t3 = randomTerm(); - int pos1 = 1 + random.nextInt(3); - int pos2 = pos1 + 1 + random.nextInt(3); + int pos1 = 1 + random().nextInt(3); + int pos2 = pos1 + 1 + random().nextInt(3); PhraseQuery q1 = new PhraseQuery(); q1.add(t1); q1.add(t2, pos1); @@ -155,8 +155,8 @@ public class TestSloppyPhraseQuery2 extends SearchEquivalenceTestBase { /** same as the above with posincr */ public void testRepetitiveIncreasingSloppiness3WithHoles() throws Exception { Term t = randomTerm(); - int pos1 = 1 + random.nextInt(3); - int pos2 = pos1 + 1 + random.nextInt(3); + int pos1 = 1 + random().nextInt(3); + int pos2 = pos1 + 1 + random().nextInt(3); PhraseQuery q1 = new PhraseQuery(); q1.add(t); q1.add(t, pos1); @@ -174,7 +174,7 @@ public class TestSloppyPhraseQuery2 extends SearchEquivalenceTestBase { /** MultiPhraseQuery~N ⊆ MultiPhraseQuery~N+1 */ public void testRandomIncreasingSloppiness() throws Exception { - long seed = random.nextLong(); + long seed = random().nextLong(); MultiPhraseQuery q1 = randomPhraseQuery(seed); MultiPhraseQuery q2 = randomPhraseQuery(seed); for (int i = 0; i < 10; i++) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSort.java b/lucene/core/src/test/org/apache/lucene/search/TestSort.java index b32a607dda3..df469992af0 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSort.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSort.java @@ -123,20 +123,20 @@ public class TestSort extends LuceneTestCase { throws IOException { Directory indexStore = newDirectory(); dirs.add(indexStore); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); final DocValues.Type stringDVType; if (dvStringSorted) { // Index sorted - stringDVType = random.nextBoolean() ? DocValues.Type.BYTES_VAR_SORTED : DocValues.Type.BYTES_FIXED_SORTED; + stringDVType = random().nextBoolean() ? DocValues.Type.BYTES_VAR_SORTED : DocValues.Type.BYTES_FIXED_SORTED; } else { // Index non-sorted - if (random.nextBoolean()) { + if (random().nextBoolean()) { // Fixed - stringDVType = random.nextBoolean() ? DocValues.Type.BYTES_FIXED_STRAIGHT : DocValues.Type.BYTES_FIXED_DEREF; + stringDVType = random().nextBoolean() ? DocValues.Type.BYTES_FIXED_STRAIGHT : DocValues.Type.BYTES_FIXED_DEREF; } else { // Var - stringDVType = random.nextBoolean() ? DocValues.Type.BYTES_VAR_STRAIGHT : DocValues.Type.BYTES_VAR_DEREF; + stringDVType = random().nextBoolean() ? DocValues.Type.BYTES_VAR_STRAIGHT : DocValues.Type.BYTES_VAR_DEREF; } } @@ -206,7 +206,7 @@ public class TestSort extends LuceneTestCase { dirs.add(indexStore); IndexWriter writer = new IndexWriter( indexStore, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMergePolicy(newLogMergePolicy(97)) ); FieldType onlyStored = new FieldType(); @@ -285,7 +285,7 @@ public class TestSort extends LuceneTestCase { public int getRandomNumber(final int low, final int high) { - int randInt = (Math.abs(random.nextInt()) % (high - low)) + low; + int randInt = (Math.abs(random().nextInt()) % (high - low)) + low; return randInt; } @@ -313,7 +313,7 @@ public class TestSort extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); - dvStringSorted = random.nextBoolean(); + dvStringSorted = random().nextBoolean(); full = getFullIndex(); searchX = getXIndex(); searchY = getYIndex(); @@ -408,7 +408,7 @@ public class TestSort extends LuceneTestCase { if (dvStringSorted) { // If you index as sorted source you can still sort by // value instead: - return random.nextBoolean() ? SortField.Type.STRING : SortField.Type.STRING_VAL; + return random().nextBoolean() ? SortField.Type.STRING : SortField.Type.STRING_VAL; } else { return SortField.Type.STRING_VAL; } @@ -511,7 +511,7 @@ public class TestSort extends LuceneTestCase { private void verifyStringSort(Sort sort) throws Exception { final IndexSearcher searcher = getFullStrings(); - final ScoreDoc[] result = searcher.search(new MatchAllDocsQuery(), null, _TestUtil.nextInt(random, 500, searcher.getIndexReader().maxDoc()), sort).scoreDocs; + final ScoreDoc[] result = searcher.search(new MatchAllDocsQuery(), null, _TestUtil.nextInt(random(), 500, searcher.getIndexReader().maxDoc()), sort).scoreDocs; StringBuilder buff = new StringBuilder(); int n = result.length; String last = null; @@ -801,7 +801,7 @@ public class TestSort extends LuceneTestCase { assertMatches (full, queryG, sort, "ZYXW"); // Do the same for a ParallelMultiSearcher - ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8)); + ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random(), 2, 8)); IndexSearcher parallelSearcher=new IndexSearcher (full.getIndexReader(), exec); sort.setSort (new SortField ("int", SortField.Type.INT), @@ -845,7 +845,7 @@ public class TestSort extends LuceneTestCase { // test a variety of sorts using a parallel multisearcher public void testParallelMultiSort() throws Exception { - ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8)); + ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random(), 2, 8)); IndexSearcher searcher = new IndexSearcher( new MultiReader(searchX.getIndexReader(), searchY.getIndexReader()), exec); @@ -1236,7 +1236,7 @@ public class TestSort extends LuceneTestCase { public void testEmptyStringVsNullStringSort() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); doc.add(newField("f", "", StringField.TYPE_UNSTORED)); doc.add(newField("t", "1", StringField.TYPE_UNSTORED)); @@ -1261,7 +1261,7 @@ public class TestSort extends LuceneTestCase { public void testLUCENE2142() throws IOException { Directory indexStore = newDirectory(); IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for (int i=0; i<5; i++) { Document doc = new Document(); doc.add (new StringField ("string", "a"+i)); @@ -1283,7 +1283,7 @@ public class TestSort extends LuceneTestCase { public void testCountingCollector() throws Exception { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); for (int i=0; i<5; i++) { Document doc = new Document(); doc.add (new StringField ("string", "a"+i)); @@ -1333,6 +1333,7 @@ public class TestSort extends LuceneTestCase { } public void testRandomStringSort() throws Exception { + Random random = new Random(random().nextLong()); assumeTrue("cannot work with Lucene3x codec", defaultCodecSupportsDocValues()); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java b/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java index af8704d78ae..5a97398f7be 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java @@ -40,7 +40,7 @@ public class TestSubScorerFreqs extends LuceneTestCase { public static void makeIndex() throws Exception { dir = new RAMDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); // make sure we have more than one segment occationally int num = atLeast(31); for (int i = 0; i < num; i++) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java index fed281cd2f8..397389f815c 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java @@ -240,7 +240,7 @@ public class TestTermRangeQuery extends LuceneTestCase { } private void initializeIndex(String[] values) throws IOException { - initializeIndex(values, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + initializeIndex(values, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); } private void initializeIndex(String[] values, Analyzer analyzer) throws IOException { @@ -254,7 +254,7 @@ public class TestTermRangeQuery extends LuceneTestCase { // shouldnt create an analyzer for every doc? private void addDoc(String content) throws IOException { - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND)); insertDoc(writer, content); writer.close(); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java index 4015c6ac4ad..1e01f3a6ebe 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java @@ -47,8 +47,8 @@ public class TestTermScorer extends LuceneTestCase { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMergePolicy(newLogMergePolicy()) .setSimilarity(new DefaultSimilarity())); for (int i = 0; i < values.length; i++) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java b/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java index 838ca96b1ce..e8cec9359ef 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java @@ -46,7 +46,7 @@ public class TestTermVectors extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy())); //writer.setUseCompoundFile(true); //writer.infoStream = System.out; for (int i = 0; i < 1000; i++) { @@ -107,7 +107,7 @@ public class TestTermVectors extends LuceneTestCase { public void testTermVectorsFieldOrder() throws IOException { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_STORED); ft.setStoreTermVectors(true); @@ -238,8 +238,8 @@ public class TestTermVectors extends LuceneTestCase { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true)) + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)) .setOpenMode(OpenMode.CREATE) .setMergePolicy(newLogMergePolicy()) .setSimilarity(new DefaultSimilarity())); @@ -261,7 +261,7 @@ public class TestTermVectors extends LuceneTestCase { while (termsEnum.next() != null) { String text = termsEnum.term().utf8ToString(); - docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(knownSearcher.reader), docs, true); + docs = _TestUtil.docs(random(), termsEnum, MultiFields.getLiveDocs(knownSearcher.reader), docs, true); while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int docId = docs.docID(); @@ -333,8 +333,8 @@ public class TestTermVectors extends LuceneTestCase { // Test only a few docs having vectors public void testRareVectors() throws IOException { - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true)) + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)) .setOpenMode(OpenMode.CREATE)); if (VERBOSE) { System.out.println("TEST: now add non-vectors"); @@ -380,9 +380,9 @@ public class TestTermVectors extends LuceneTestCase { // In a single doc, for the same field, mix the term // vectors up public void testMixedVectrosVectors() throws IOException { - RandomIndexWriter writer = new RandomIndexWriter(random, directory, + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setOpenMode(OpenMode.CREATE)); + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)).setOpenMode(OpenMode.CREATE)); Document doc = new Document(); FieldType ft2 = new FieldType(TextField.TYPE_STORED); @@ -448,7 +448,7 @@ public class TestTermVectors extends LuceneTestCase { private IndexWriter createWriter(Directory dir) throws IOException { return new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMaxBufferedDocs(2)); + new MockAnalyzer(random())).setMaxBufferedDocs(2)); } private void createDir(Directory dir) throws IOException { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java index ddddfae383d..1e2695b2089 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java @@ -80,7 +80,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase { "blueberry pizza", }; directory = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); for (int i=0; i test2 - final IndexInput in = dir.openInput("test", newIOContext(random)); + final IndexInput in = dir.openInput("test", newIOContext(random())); - out = dir.createOutput("test2", newIOContext(random)); + out = dir.createOutput("test2", newIOContext(random())); upto = 0; while (upto < size) { - if (random.nextBoolean()) { + if (random().nextBoolean()) { out.writeByte(in.readByte()); upto++; } else { final int chunk = Math.min( - _TestUtil.nextInt(random, 1, bytes.length), size - upto); + _TestUtil.nextInt(random(), 1, bytes.length), size - upto); out.copyBytes(in, chunk); upto += chunk; } @@ -81,16 +81,16 @@ public class TestCopyBytes extends LuceneTestCase { in.close(); // verify - IndexInput in2 = dir.openInput("test2", newIOContext(random)); + IndexInput in2 = dir.openInput("test2", newIOContext(random())); upto = 0; while (upto < size) { - if (random.nextBoolean()) { + if (random().nextBoolean()) { final byte v = in2.readByte(); assertEquals(value(upto), v); upto++; } else { final int limit = Math.min( - _TestUtil.nextInt(random, 1, bytes.length), size - upto); + _TestUtil.nextInt(random(), 1, bytes.length), size - upto); in2.readBytes(bytes, 0, limit); for (int byteIdx = 0; byteIdx < limit; byteIdx++) { assertEquals(value(upto), bytes[byteIdx]); @@ -109,9 +109,9 @@ public class TestCopyBytes extends LuceneTestCase { // LUCENE-3541 public void testCopyBytesWithThreads() throws Exception { - int datalen = _TestUtil.nextInt(random, 101, 10000); + int datalen = _TestUtil.nextInt(random(), 101, 10000); byte data[] = new byte[datalen]; - random.nextBytes(data); + random().nextBytes(data); Directory d = newDirectory(); IndexOutput output = d.createOutput("data", IOContext.DEFAULT); diff --git a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java index e17cf53f75b..16e4689217a 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java @@ -31,7 +31,7 @@ public class TestDirectory extends LuceneTestCase { for (Directory dir : dirs) { dir.close(); try { - dir.createOutput("test", newIOContext(random)); + dir.createOutput("test", newIOContext(random())); fail("did not hit expected exception"); } catch (AlreadyClosedException ace) { } @@ -56,7 +56,7 @@ public class TestDirectory extends LuceneTestCase { dir.ensureOpen(); String fname = "foo." + i; String lockname = "foo" + i + ".lck"; - IndexOutput out = dir.createOutput(fname, newIOContext(random)); + IndexOutput out = dir.createOutput(fname, newIOContext(random())); out.writeByte((byte)i); out.close(); @@ -70,7 +70,7 @@ public class TestDirectory extends LuceneTestCase { // closed and will cause a failure to delete the file. if (d2 instanceof MMapDirectory) continue; - IndexInput input = d2.openInput(fname, newIOContext(random)); + IndexInput input = d2.openInput(fname, newIOContext(random())); assertEquals((byte)i, input.readByte()); input.close(); } @@ -141,7 +141,7 @@ public class TestDirectory extends LuceneTestCase { private void checkDirectoryFilter(Directory dir) throws IOException { String name = "file"; try { - dir.createOutput(name, newIOContext(random)).close(); + dir.createOutput(name, newIOContext(random())).close(); assertTrue(dir.fileExists(name)); assertTrue(Arrays.asList(dir.listAll()).contains(name)); } finally { @@ -156,7 +156,7 @@ public class TestDirectory extends LuceneTestCase { path.mkdirs(); new File(path, "subdir").mkdirs(); Directory fsDir = new SimpleFSDirectory(path, null); - assertEquals(0, new RAMDirectory(fsDir, newIOContext(random)).listAll().length); + assertEquals(0, new RAMDirectory(fsDir, newIOContext(random())).listAll().length); } finally { _TestUtil.rmDir(path); } @@ -167,7 +167,7 @@ public class TestDirectory extends LuceneTestCase { File path = _TestUtil.getTempDir("testnotdir"); Directory fsDir = new SimpleFSDirectory(path, null); try { - IndexOutput out = fsDir.createOutput("afile", newIOContext(random)); + IndexOutput out = fsDir.createOutput("afile", newIOContext(random())); out.close(); assertTrue(fsDir.fileExists("afile")); try { diff --git a/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java index 15d0f6fc593..d11864830a0 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java @@ -43,16 +43,16 @@ public class TestFileSwitchDirectory extends LuceneTestCase { fileExtensions.add(Lucene40StoredFieldsWriter.FIELDS_EXTENSION); fileExtensions.add(Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION); - MockDirectoryWrapper primaryDir = new MockDirectoryWrapper(random, new RAMDirectory()); + MockDirectoryWrapper primaryDir = new MockDirectoryWrapper(random(), new RAMDirectory()); primaryDir.setCheckIndexOnClose(false); // only part of an index - MockDirectoryWrapper secondaryDir = new MockDirectoryWrapper(random, new RAMDirectory()); + MockDirectoryWrapper secondaryDir = new MockDirectoryWrapper(random(), new RAMDirectory()); secondaryDir.setCheckIndexOnClose(false); // only part of an index FileSwitchDirectory fsd = new FileSwitchDirectory(fileExtensions, primaryDir, secondaryDir, true); // for now we wire Lucene40Codec because we rely upon its specific impl IndexWriter writer = new IndexWriter( fsd, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). setMergePolicy(newLogMergePolicy(false)).setCodec(Codec.forName("Lucene40")) ); TestIndexWriterReader.createIndexNoClose(true, "ram", writer); @@ -87,7 +87,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase { Directory a = new SimpleFSDirectory(_TestUtil.getTempDir("foo")); Directory b = new SimpleFSDirectory(_TestUtil.getTempDir("bar")); FileSwitchDirectory switchDir = new FileSwitchDirectory(primaryExtensions, a, b, true); - return new MockDirectoryWrapper(random, switchDir); + return new MockDirectoryWrapper(random(), switchDir); } // LUCENE-3380 -- make sure we get exception if the directory really does not exist. @@ -107,7 +107,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase { Directory dir = newFSSwitchDirectory(Collections.emptySet()); String name = "file"; try { - dir.createOutput(name, newIOContext(random)).close(); + dir.createOutput(name, newIOContext(random())).close(); assertTrue(dir.fileExists(name)); assertTrue(Arrays.asList(dir.listAll()).contains(name)); } finally { @@ -118,12 +118,12 @@ public class TestFileSwitchDirectory extends LuceneTestCase { // LUCENE-3380 test that delegate compound files correctly. public void testCompoundFileAppendTwice() throws IOException { Directory newDir = newFSSwitchDirectory(Collections.singleton("cfs")); - CompoundFileDirectory csw = new CompoundFileDirectory(newDir, "d.cfs", newIOContext(random), true); + CompoundFileDirectory csw = new CompoundFileDirectory(newDir, "d.cfs", newIOContext(random()), true); createSequenceFile(newDir, "d1", (byte) 0, 15); - IndexOutput out = csw.createOutput("d.xyz", newIOContext(random)); + IndexOutput out = csw.createOutput("d.xyz", newIOContext(random())); out.writeInt(0); try { - newDir.copy(csw, "d1", "d1", newIOContext(random)); + newDir.copy(csw, "d1", "d1", newIOContext(random())); fail("file does already exist"); } catch (IllegalArgumentException e) { // @@ -134,7 +134,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase { csw.close(); - CompoundFileDirectory cfr = new CompoundFileDirectory(newDir, "d.cfs", newIOContext(random), false); + CompoundFileDirectory cfr = new CompoundFileDirectory(newDir, "d.cfs", newIOContext(random()), false); assertEquals(1, cfr.listAll().length); assertEquals("d.xyz", cfr.listAll()[0]); cfr.close(); @@ -146,7 +146,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase { * computed as start + offset where offset is the number of the byte. */ private void createSequenceFile(Directory dir, String name, byte start, int size) throws IOException { - IndexOutput os = dir.createOutput(name, newIOContext(random)); + IndexOutput os = dir.createOutput(name, newIOContext(random())); for (int i=0; i < size; i++) { os.writeByte(start); start ++; diff --git a/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java b/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java index c41ca6502a4..277dd21dc5d 100755 --- a/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java @@ -43,14 +43,14 @@ public class TestLockFactory extends LuceneTestCase { // methods are called at the right time, locks are created, etc. public void testCustomLockFactory() throws IOException { - Directory dir = new MockDirectoryWrapper(random, new RAMDirectory()); + Directory dir = new MockDirectoryWrapper(random(), new RAMDirectory()); MockLockFactory lf = new MockLockFactory(); dir.setLockFactory(lf); // Lock prefix should have been set: assertTrue("lock prefix was not set by the RAMDirectory", lf.lockPrefixSet); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); // add 100 documents (so that commit lock is used) for (int i = 0; i < 100; i++) { @@ -76,19 +76,19 @@ public class TestLockFactory extends LuceneTestCase { // exceptions raised: // Verify: NoLockFactory allows two IndexWriters public void testRAMDirectoryNoLocking() throws IOException { - Directory dir = new MockDirectoryWrapper(random, new RAMDirectory()); + Directory dir = new MockDirectoryWrapper(random(), new RAMDirectory()); dir.setLockFactory(NoLockFactory.getNoLockFactory()); assertTrue("RAMDirectory.setLockFactory did not take", NoLockFactory.class.isInstance(dir.getLockFactory())); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.commit(); // required so the second open succeed // Create a 2nd IndexWriter. This is normally not allowed but it should run through since we're not // using any locks: IndexWriter writer2 = null; try { - writer2 = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer2 = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); } catch (Exception e) { e.printStackTrace(System.out); fail("Should not have hit an IOException with no locking"); @@ -108,12 +108,12 @@ public class TestLockFactory extends LuceneTestCase { assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.getLockFactory(), SingleInstanceLockFactory.class.isInstance(dir.getLockFactory())); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); // Create a 2nd IndexWriter. This should fail: IndexWriter writer2 = null; try { - writer2 = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer2 = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); fail("Should have hit an IOException with two IndexWriters on default SingleInstanceLockFactory"); } catch (IOException e) { } @@ -151,7 +151,7 @@ public class TestLockFactory extends LuceneTestCase { Directory dir = newFSDirectory(indexDir, lockFactory); // First create a 1 doc index: - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); addDoc(w); w.close(); @@ -289,7 +289,7 @@ public class TestLockFactory extends LuceneTestCase { IndexWriter writer = null; for(int i=0;i= minTargetSize); } @@ -118,9 +118,9 @@ public class TestArrayUtil extends LuceneTestCase { } private Integer[] createRandomArray(int maxSize) { - final Integer[] a = new Integer[random.nextInt(maxSize) + 1]; + final Integer[] a = new Integer[random().nextInt(maxSize) + 1]; for (int i = 0; i < a.length; i++) { - a[i] = Integer.valueOf(random.nextInt(a.length)); + a[i] = Integer.valueOf(random().nextInt(a.length)); } return a; } @@ -146,9 +146,9 @@ public class TestArrayUtil extends LuceneTestCase { } private Integer[] createSparseRandomArray(int maxSize) { - final Integer[] a = new Integer[random.nextInt(maxSize) + 1]; + final Integer[] a = new Integer[random().nextInt(maxSize) + 1]; for (int i = 0; i < a.length; i++) { - a[i] = Integer.valueOf(random.nextInt(2)); + a[i] = Integer.valueOf(random().nextInt(2)); } return a; } @@ -228,8 +228,8 @@ public class TestArrayUtil extends LuceneTestCase { // so they should always be in order after sorting. // The other half has defined order, but no (-1) value (they should appear after // all above, when sorted). - final boolean equal = random.nextBoolean(); - items[i] = new Item(equal ? (i+1) : -1, equal ? 0 : (random.nextInt(1000)+1)); + final boolean equal = random().nextBoolean(); + items[i] = new Item(equal ? (i+1) : -1, equal ? 0 : (random().nextInt(1000)+1)); } if (VERBOSE) System.out.println("Before: " + Arrays.toString(items)); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java b/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java index 26ea474d4e9..42bbea4b677 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java @@ -35,18 +35,18 @@ public class TestByteBlockPool extends LuceneTestCase { final int numValues = atLeast(100); BytesRef ref = new BytesRef(); for (int i = 0; i < numValues; i++) { - final String value = _TestUtil.randomRealisticUnicodeString(random, + final String value = _TestUtil.randomRealisticUnicodeString(random(), maxLength); list.add(value); ref.copyChars(value); pool.copy(ref); } RAMDirectory dir = new RAMDirectory(); - IndexOutput stream = dir.createOutput("foo.txt", newIOContext(random)); + IndexOutput stream = dir.createOutput("foo.txt", newIOContext(random())); pool.writePool(stream); stream.flush(); stream.close(); - IndexInput input = dir.openInput("foo.txt", newIOContext(random)); + IndexInput input = dir.openInput("foo.txt", newIOContext(random())); assertEquals(pool.byteOffset + pool.byteUpto, stream.length()); BytesRef expected = new BytesRef(); BytesRef actual = new BytesRef(); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java b/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java index cb5bb535c87..ced6ae05349 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java @@ -40,7 +40,7 @@ public class TestBytesRef extends LuceneTestCase { public void testFromChars() { for (int i = 0; i < 100; i++) { - String s = _TestUtil.randomUnicodeString(random); + String s = _TestUtil.randomUnicodeString(random()); String s2 = new BytesRef(s).utf8ToString(); assertEquals(s, s2); } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java b/lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java index 1e0ecd99013..359d58da7c3 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java @@ -49,13 +49,13 @@ public class TestBytesRefHash extends LuceneTestCase { } private ByteBlockPool newPool(){ - return random.nextBoolean() && pool != null ? pool - : new ByteBlockPool(new RecyclingByteBlockAllocator(ByteBlockPool.BYTE_BLOCK_SIZE, random.nextInt(25))); + return random().nextBoolean() && pool != null ? pool + : new ByteBlockPool(new RecyclingByteBlockAllocator(ByteBlockPool.BYTE_BLOCK_SIZE, random().nextInt(25))); } private BytesRefHash newHash(ByteBlockPool blockPool) { - final int initSize = 2 << 1 + random.nextInt(5); - return random.nextBoolean() ? new BytesRefHash(blockPool) : new BytesRefHash( + final int initSize = 2 << 1 + random().nextInt(5); + return random().nextBoolean() ? new BytesRefHash(blockPool) : new BytesRefHash( blockPool, initSize, new BytesRefHash.DirectBytesStartArray(initSize)); } @@ -67,11 +67,11 @@ public class TestBytesRefHash extends LuceneTestCase { BytesRef ref = new BytesRef(); int num = atLeast(2); for (int j = 0; j < num; j++) { - final int mod = 1+random.nextInt(39); + final int mod = 1+random().nextInt(39); for (int i = 0; i < 797; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(random(), 1000); } while (str.length() == 0); ref.copyChars(str); int count = hash.size(); @@ -105,7 +105,7 @@ public class TestBytesRefHash extends LuceneTestCase { for (int i = 0; i < 797; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(random(), 1000); } while (str.length() == 0); ref.copyChars(str); int count = hash.size(); @@ -144,7 +144,7 @@ public class TestBytesRefHash extends LuceneTestCase { for (int i = 0; i < size; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(random(), 1000); } while (str.length() == 0); ref.copyChars(str); final int key = hash.add(ref); @@ -184,7 +184,7 @@ public class TestBytesRefHash extends LuceneTestCase { for (int i = 0; i < 797; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(random(), 1000); } while (str.length() == 0); ref.copyChars(str); hash.add(ref); @@ -223,7 +223,7 @@ public class TestBytesRefHash extends LuceneTestCase { for (int i = 0; i < 797; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(random(), 1000); } while (str.length() == 0); ref.copyChars(str); int count = hash.size(); @@ -251,9 +251,9 @@ public class TestBytesRefHash extends LuceneTestCase { @Test(expected = MaxBytesLengthExceededException.class) public void testLargeValue() { - int[] sizes = new int[] { random.nextInt(5), - ByteBlockPool.BYTE_BLOCK_SIZE - 33 + random.nextInt(31), - ByteBlockPool.BYTE_BLOCK_SIZE - 1 + random.nextInt(37) }; + int[] sizes = new int[] { random().nextInt(5), + ByteBlockPool.BYTE_BLOCK_SIZE - 33 + random().nextInt(31), + ByteBlockPool.BYTE_BLOCK_SIZE - 1 + random().nextInt(37) }; BytesRef ref = new BytesRef(); for (int i = 0; i < sizes.length; i++) { ref.bytes = new byte[sizes[i]]; @@ -286,7 +286,7 @@ public class TestBytesRefHash extends LuceneTestCase { for (int i = 0; i < 797; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(random(), 1000); } while (str.length() == 0); ref.copyChars(str); int count = hash.size(); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java b/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java index f6d25ceb56f..365ae451553 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java @@ -26,7 +26,7 @@ public class TestCharsRef extends LuceneTestCase { CharsRef utf16[] = new CharsRef[numStrings]; for (int i = 0; i < numStrings; i++) { - String s = _TestUtil.randomUnicodeString(random); + String s = _TestUtil.randomUnicodeString(random()); utf8[i] = new BytesRef(s); utf16[i] = new CharsRef(s); } @@ -44,8 +44,8 @@ public class TestCharsRef extends LuceneTestCase { StringBuilder builder = new StringBuilder(); int numStrings = atLeast(10); for (int i = 0; i < numStrings; i++) { - char[] charArray = _TestUtil.randomRealisticUnicodeString(random, 1, 100).toCharArray(); - int offset = random.nextInt(charArray.length); + char[] charArray = _TestUtil.randomRealisticUnicodeString(random(), 1, 100).toCharArray(); + int offset = random().nextInt(charArray.length); int length = charArray.length - offset; builder.append(charArray, offset, length); ref.append(charArray, offset, length); @@ -58,8 +58,8 @@ public class TestCharsRef extends LuceneTestCase { int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { CharsRef ref = new CharsRef(); - char[] charArray = _TestUtil.randomRealisticUnicodeString(random, 1, 100).toCharArray(); - int offset = random.nextInt(charArray.length); + char[] charArray = _TestUtil.randomRealisticUnicodeString(random(), 1, 100).toCharArray(); + int offset = random().nextInt(charArray.length); int length = charArray.length - offset; String str = new String(charArray, offset, length); ref.copyChars(charArray, offset, length); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java b/lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java index fb9235c2122..52095e59d93 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java @@ -26,9 +26,9 @@ import java.util.List; public class TestCollectionUtil extends LuceneTestCase { private List createRandomList(int maxSize) { - final Integer[] a = new Integer[random.nextInt(maxSize) + 1]; + final Integer[] a = new Integer[random().nextInt(maxSize) + 1]; for (int i = 0; i < a.length; i++) { - a[i] = Integer.valueOf(random.nextInt(a.length)); + a[i] = Integer.valueOf(random().nextInt(a.length)); } return Arrays.asList(a); } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java b/lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java index 033427ac9cb..fb9d668ec04 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java @@ -46,8 +46,8 @@ public class TestFieldCacheSanityChecker extends LuceneTestCase { dirA = newDirectory(); dirB = newDirectory(); - IndexWriter wA = new IndexWriter(dirA, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); - IndexWriter wB = new IndexWriter(dirB, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter wA = new IndexWriter(dirA, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); + IndexWriter wB = new IndexWriter(dirB, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); long theLong = Long.MAX_VALUE; double theDouble = Double.MAX_VALUE; diff --git a/lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java b/lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java index a1ffbde2bdb..25581f7a803 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java @@ -43,7 +43,7 @@ public class TestFixedBitSet extends LuceneTestCase { } void doPrevSetBit(BitSet a, FixedBitSet b) { - int aa = a.size() + random.nextInt(100); + int aa = a.size() + random().nextInt(100); int bb = aa; do { // aa = a.prevSetBit(aa-1); @@ -75,7 +75,7 @@ public class TestFixedBitSet extends LuceneTestCase { DocIdSetIterator iterator = b.iterator(); do { aa = a.nextSetBit(aa+1); - bb = (bb < b.length() && random.nextBoolean()) ? iterator.nextDoc() : iterator.advance(bb + 1); + bb = (bb < b.length() && random().nextBoolean()) ? iterator.nextDoc() : iterator.advance(bb + 1); assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb); } while (aa>=0); } @@ -85,7 +85,7 @@ public class TestFixedBitSet extends LuceneTestCase { DocIdSetIterator iterator = b.iterator(); do { aa = a.nextSetBit(aa+1); - bb = random.nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1); + bb = random().nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1); assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb); } while (aa>=0); } @@ -95,29 +95,29 @@ public class TestFixedBitSet extends LuceneTestCase { FixedBitSet b0=null; for (int i=0; i0) { - int nOper = random.nextInt(sz); + int nOper = random().nextInt(sz); for (int j=0; j jdk = Collections.newSetFromMap( new IdentityHashMap()); RamUsageEstimator.IdentityHashSet us = new RamUsageEstimator.IdentityHashSet(); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java b/lucene/core/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java index 8ef5b911e56..e94cefbf18b 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java @@ -69,18 +69,18 @@ public class TestIndexableBinaryStringTools extends LuceneTestCase { char[] encoded2 = new char[MAX_RANDOM_BINARY_LENGTH * 10]; for (int testNum = 0; testNum < NUM_RANDOM_TESTS; ++testNum) { - int numBytes1 = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1 + int numBytes1 = random().nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1 for (int byteNum = 0; byteNum < numBytes1; ++byteNum) { - int randomInt = random.nextInt(0x100); + int randomInt = random().nextInt(0x100); originalArray1[byteNum] = (byte) randomInt; originalString1[byteNum] = (char) randomInt; } - int numBytes2 = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1 + int numBytes2 = random().nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1 for (int byteNum = 0; byteNum < numBytes2; ++byteNum) { - int randomInt = random.nextInt(0x100); + int randomInt = random().nextInt(0x100); original2[byteNum] = (byte) randomInt; originalString2[byteNum] = (char) randomInt; } @@ -169,10 +169,10 @@ public class TestIndexableBinaryStringTools extends LuceneTestCase { char[] encoded = new char[MAX_RANDOM_BINARY_LENGTH * 10]; byte[] decoded = new byte[MAX_RANDOM_BINARY_LENGTH]; for (int testNum = 0; testNum < NUM_RANDOM_TESTS; ++testNum) { - int numBytes = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1 + int numBytes = random().nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1 for (int byteNum = 0; byteNum < numBytes; ++byteNum) { - binary[byteNum] = (byte) random.nextInt(0x100); + binary[byteNum] = (byte) random().nextInt(0x100); } int encodedLen = IndexableBinaryStringTools.getEncodedLength(binary, 0, diff --git a/lucene/core/src/test/org/apache/lucene/util/TestNumericUtils.java b/lucene/core/src/test/org/apache/lucene/util/TestNumericUtils.java index b07e60e03bf..a34855aa6b9 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestNumericUtils.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestNumericUtils.java @@ -347,7 +347,7 @@ public class TestNumericUtils extends LuceneTestCase { public void testRandomSplit() throws Exception { long num = (long) atLeast(10); for (long i=0; i < num; i++) { - executeOneRandomSplit(random); + executeOneRandomSplit(random()); } } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java b/lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java index 47b50b6eecd..c1a20244eb5 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java @@ -65,7 +65,7 @@ public class TestOpenBitSet extends LuceneTestCase { } void doPrevSetBit(BitSet a, OpenBitSet b) { - int aa = a.size() + random.nextInt(100); + int aa = a.size() + random().nextInt(100); int bb = aa; do { // aa = a.prevSetBit(aa-1); @@ -79,7 +79,7 @@ public class TestOpenBitSet extends LuceneTestCase { } void doPrevSetBitLong(BitSet a, OpenBitSet b) { - int aa = a.size() + random.nextInt(100); + int aa = a.size() + random().nextInt(100); int bb = aa; do { // aa = a.prevSetBit(aa-1); @@ -103,7 +103,7 @@ public class TestOpenBitSet extends LuceneTestCase { OpenBitSetIterator iterator = new OpenBitSetIterator(b); do { aa = a.nextSetBit(aa+1); - bb = random.nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1); + bb = random().nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1); assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb); } while (aa>=0); } @@ -113,7 +113,7 @@ public class TestOpenBitSet extends LuceneTestCase { OpenBitSetIterator iterator = new OpenBitSetIterator(b); do { aa = a.nextSetBit(aa+1); - bb = random.nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1); + bb = random().nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1); assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb); } while (aa>=0); } @@ -123,33 +123,33 @@ public class TestOpenBitSet extends LuceneTestCase { OpenBitSet b0=null; for (int i=0; i0) { - int nOper = random.nextInt(sz); + int nOper = random().nextInt(sz); for (int j=0; j>1)+1); + fromIndex = random().nextInt(sz+80); + toIndex = fromIndex + random().nextInt((sz>>1)+1); BitSet aa = (BitSet)a.clone(); aa.flip(fromIndex,toIndex); OpenBitSet bb = b.clone(); bb.flip(fromIndex,toIndex); doIterate(aa,bb, mode); // a problem here is from flip or doIterate - fromIndex = random.nextInt(sz+80); - toIndex = fromIndex + random.nextInt((sz>>1)+1); + fromIndex = random().nextInt(sz+80); + toIndex = fromIndex + random().nextInt((sz>>1)+1); aa = (BitSet)a.clone(); aa.clear(fromIndex,toIndex); bb = b.clone(); bb.clear(fromIndex,toIndex); @@ -198,8 +198,8 @@ public class TestOpenBitSet extends LuceneTestCase { doPrevSetBit(aa,bb); doPrevSetBitLong(aa,bb); - fromIndex = random.nextInt(sz+80); - toIndex = fromIndex + random.nextInt((sz>>1)+1); + fromIndex = random().nextInt(sz+80); + toIndex = fromIndex + random().nextInt((sz>>1)+1); aa = (BitSet)a.clone(); aa.set(fromIndex,toIndex); bb = b.clone(); bb.set(fromIndex,toIndex); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestPagedBytes.java b/lucene/core/src/test/org/apache/lucene/util/TestPagedBytes.java index 01d065e6075..d308facd9d3 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestPagedBytes.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestPagedBytes.java @@ -17,9 +17,7 @@ package org.apache.lucene.util; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; +import java.util.*; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; @@ -27,21 +25,22 @@ import org.apache.lucene.store.DataOutput; public class TestPagedBytes extends LuceneTestCase { public void testDataInputOutput() throws Exception { + Random random = random(); for(int iter=0;iter<5*RANDOM_MULTIPLIER;iter++) { final int blockBits = _TestUtil.nextInt(random, 1, 20); final int blockSize = 1 << blockBits; final PagedBytes p = new PagedBytes(blockBits); final DataOutput out = p.getDataOutput(); - final int numBytes = random.nextInt(10000000); + final int numBytes = random().nextInt(10000000); final byte[] answer = new byte[numBytes]; - random.nextBytes(answer); + random().nextBytes(answer); int written = 0; while(written < numBytes) { - if (random.nextInt(10) == 7) { + if (random().nextInt(10) == 7) { out.writeByte(answer[written++]); } else { - int chunk = Math.min(random.nextInt(1000), numBytes - written); + int chunk = Math.min(random().nextInt(1000), numBytes - written); out.writeBytes(answer, written, chunk); written += chunk; } @@ -54,10 +53,10 @@ public class TestPagedBytes extends LuceneTestCase { final byte[] verify = new byte[numBytes]; int read = 0; while(read < numBytes) { - if (random.nextInt(10) == 7) { + if (random().nextInt(10) == 7) { verify[read++] = in.readByte(); } else { - int chunk = Math.min(random.nextInt(1000), numBytes - read); + int chunk = Math.min(random().nextInt(1000), numBytes - read); in.readBytes(verify, read, chunk); read += chunk; } @@ -77,6 +76,7 @@ public class TestPagedBytes extends LuceneTestCase { } public void testLengthPrefix() throws Exception { + Random random = random(); for(int iter=0;iter<5*RANDOM_MULTIPLIER;iter++) { final int blockBits = _TestUtil.nextInt(random, 2, 20); final int blockSize = 1 << blockBits; @@ -113,6 +113,7 @@ public class TestPagedBytes extends LuceneTestCase { // sure if caller writes their own prefix followed by the // bytes, it still works: public void testLengthPrefixAcrossTwoBlocks() throws Exception { + Random random = random(); final PagedBytes p = new PagedBytes(10); final DataOutput out = p.getDataOutput(); final byte[] bytes1 = new byte[1000]; diff --git a/lucene/core/src/test/org/apache/lucene/util/TestPriorityQueue.java b/lucene/core/src/test/org/apache/lucene/util/TestPriorityQueue.java index d36e1d1d207..7ff3da17ce9 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestPriorityQueue.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestPriorityQueue.java @@ -33,7 +33,7 @@ public class TestPriorityQueue extends LuceneTestCase { } public void testPQ() throws Exception { - testPQ(atLeast(10000), random); + testPQ(atLeast(10000), random()); } public static void testPQ(int count, Random gen) { diff --git a/lucene/core/src/test/org/apache/lucene/util/TestRamUsageEstimator.java b/lucene/core/src/test/org/apache/lucene/util/TestRamUsageEstimator.java index a19c5a70318..aff1ea074c4 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestRamUsageEstimator.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestRamUsageEstimator.java @@ -44,7 +44,7 @@ public class TestRamUsageEstimator extends LuceneTestCase { } public void testStaticOverloads() { - Random rnd = random; + Random rnd = random(); { byte[] array = new byte[rnd.nextInt(1024)]; assertEquals(sizeOf(array), sizeOf((Object) array)); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java b/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java index 2425cf19480..21672c58bc6 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java @@ -38,8 +38,8 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { } private RecyclingByteBlockAllocator newAllocator() { - return new RecyclingByteBlockAllocator(1 << (2 + random.nextInt(15)), - random.nextInt(97), new AtomicLong()); + return new RecyclingByteBlockAllocator(1 << (2 + random().nextInt(15)), + random().nextInt(97), new AtomicLong()); } @Test @@ -74,7 +74,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { int numIters = atLeast(97); for (int i = 0; i < numIters; i++) { - int num = 1 + random.nextInt(39); + int num = 1 + random().nextInt(39); for (int j = 0; j < num; j++) { block = allocator.getByteBlock(); assertNotNull(block); @@ -84,8 +84,8 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { .bytesUsed()); } byte[][] array = allocated.toArray(new byte[0][]); - int begin = random.nextInt(array.length); - int end = begin + random.nextInt(array.length - begin); + int begin = random().nextInt(array.length); + int end = begin + random().nextInt(array.length - begin); List selected = new ArrayList(); for (int j = begin; j < end; j++) { selected.add(array[j]); @@ -111,7 +111,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { int numIters = atLeast(97); for (int i = 0; i < numIters; i++) { - int num = 1 + random.nextInt(39); + int num = 1 + random().nextInt(39); for (int j = 0; j < num; j++) { block = allocator.getByteBlock(); freeButAllocated = Math.max(0, freeButAllocated - 1); @@ -123,8 +123,8 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { } byte[][] array = allocated.toArray(new byte[0][]); - int begin = random.nextInt(array.length); - int end = begin + random.nextInt(array.length - begin); + int begin = random().nextInt(array.length); + int end = begin + random().nextInt(array.length - begin); for (int j = begin; j < end; j++) { byte[] b = array[j]; assertTrue(allocated.remove(b)); @@ -135,7 +135,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { } // randomly free blocks int numFreeBlocks = allocator.numBufferedBlocks(); - int freeBlocks = allocator.freeBlocks(random.nextInt(7 + allocator + int freeBlocks = allocator.freeBlocks(random().nextInt(7 + allocator .maxBufferedBlocks())); assertEquals(allocator.numBufferedBlocks(), numFreeBlocks - freeBlocks); } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestRollingBuffer.java b/lucene/core/src/test/org/apache/lucene/util/TestRollingBuffer.java index 8c6d1355b07..9c4d1b92e91 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestRollingBuffer.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestRollingBuffer.java @@ -1,5 +1,7 @@ package org.apache.lucene.util; +import java.util.Random; + /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -45,6 +47,7 @@ public class TestRollingBuffer extends LuceneTestCase { final int maxPos = atLeast(10000); final FixedBitSet posSet = new FixedBitSet(maxPos + 1000); int posUpto = 0; + Random random = random(); while (freeBeforePos < maxPos) { if (random.nextInt(4) == 1) { final int limit = rarely() ? 1000 : 20; diff --git a/lucene/core/src/test/org/apache/lucene/util/TestRollingCharBuffer.java b/lucene/core/src/test/org/apache/lucene/util/TestRollingCharBuffer.java index 7a14378b254..5fb87566404 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestRollingCharBuffer.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestRollingCharBuffer.java @@ -18,6 +18,7 @@ package org.apache.lucene.util; */ import java.io.StringReader; +import java.util.Random; public class TestRollingCharBuffer extends LuceneTestCase { @@ -26,6 +27,7 @@ public class TestRollingCharBuffer extends LuceneTestCase { RollingCharBuffer buffer = new RollingCharBuffer(); + Random random = random(); for(int iter=0;iter a = new HashSet(initSz); SentinelIntSet b = new SentinelIntSet(initSz, -1); for (int j=0; j set = new SetOnce(); SetOnceThread[] threads = new SetOnceThread[10]; for (int i = 0; i < threads.length; i++) { - threads[i] = new SetOnceThread(random); + threads[i] = new SetOnceThread(random()); threads[i].setName("t-" + (i+1)); threads[i].set = set; } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestSmallFloat.java b/lucene/core/src/test/org/apache/lucene/util/TestSmallFloat.java index 2ee03c6bc25..7ffb714b21c 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestSmallFloat.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestSmallFloat.java @@ -114,7 +114,7 @@ public class TestSmallFloat extends LuceneTestCase { // up iterations for more exhaustive test after changing something int num = atLeast(100000); for (int i = 0; i < num; i++) { - float f = Float.intBitsToFloat(random.nextInt()); + float f = Float.intBitsToFloat(random().nextInt()); if (Float.isNaN(f)) continue; // skip NaN byte b1 = orig_floatToByte(f); byte b2 = SmallFloat.floatToByte(f,3,15); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java b/lucene/core/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java index ddbb5403b4e..95aaeb57c6e 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java @@ -91,13 +91,13 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase { public void testRollback() throws Exception { // tests that rollback is called if failure occurs at any stage - int numObjects = random.nextInt(8) + 3; // between [3, 10] + int numObjects = random().nextInt(8) + 3; // between [3, 10] TwoPhaseCommitImpl[] objects = new TwoPhaseCommitImpl[numObjects]; for (int i = 0; i < objects.length; i++) { - boolean failOnPrepare = random.nextBoolean(); + boolean failOnPrepare = random().nextBoolean(); // we should not hit failures on commit usually - boolean failOnCommit = random.nextDouble() < 0.05; - boolean railOnRollback = random.nextBoolean(); + boolean failOnCommit = random().nextDouble() < 0.05; + boolean railOnRollback = random().nextBoolean(); objects[i] = new TwoPhaseCommitImpl(failOnPrepare, failOnCommit, railOnRollback); } @@ -138,11 +138,11 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase { } public void testNullTPCs() throws Exception { - int numObjects = random.nextInt(4) + 3; // between [3, 6] + int numObjects = random().nextInt(4) + 3; // between [3, 6] TwoPhaseCommit[] tpcs = new TwoPhaseCommit[numObjects]; boolean setNull = false; for (int i = 0; i < tpcs.length; i++) { - boolean isNull = random.nextDouble() < 0.3; + boolean isNull = random().nextDouble() < 0.3; if (isNull) { setNull = true; tpcs[i] = null; @@ -153,7 +153,7 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase { if (!setNull) { // none of the TPCs were picked to be null, pick one at random - int idx = random.nextInt(numObjects); + int idx = random().nextInt(numObjects); tpcs[idx] = null; } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java b/lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java index 32d39c091f3..e4389567e8d 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java @@ -90,7 +90,7 @@ public class TestUnicodeUtil extends LuceneTestCase { BytesRef utf8 = new BytesRef(20); int num = atLeast(50000); for (int i = 0; i < num; i++) { - final String s = _TestUtil.randomUnicodeString(random); + final String s = _TestUtil.randomUnicodeString(random()); UnicodeUtil.UTF16toUTF8(s, 0, s.length(), utf8); assertEquals(s.codePointCount(0, s.length()), UnicodeUtil.codePointCount(utf8)); @@ -103,7 +103,7 @@ public class TestUnicodeUtil extends LuceneTestCase { int[] codePoints = new int[20]; int num = atLeast(50000); for (int i = 0; i < num; i++) { - final String s = _TestUtil.randomUnicodeString(random); + final String s = _TestUtil.randomUnicodeString(random()); UnicodeUtil.UTF16toUTF8(s, 0, s.length(), utf8); UnicodeUtil.UTF8toUTF32(utf8, utf32); @@ -170,11 +170,11 @@ public class TestUnicodeUtil extends LuceneTestCase { public void testUTF8UTF16CharsRef() { int num = atLeast(3989); for (int i = 0; i < num; i++) { - String unicode = _TestUtil.randomRealisticUnicodeString(random); + String unicode = _TestUtil.randomRealisticUnicodeString(random()); BytesRef ref = new BytesRef(unicode); - char[] arr = new char[1 + random.nextInt(100)]; - int offset = random.nextInt(arr.length); - int len = random.nextInt(arr.length - offset); + char[] arr = new char[1 + random().nextInt(100)]; + int offset = random().nextInt(arr.length); + int len = random().nextInt(arr.length - offset); CharsRef cRef = new CharsRef(arr, offset, len); UnicodeUtil.UTF8toUTF16(ref, cRef); assertEquals(cRef.toString(), unicode); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java b/lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java index 908e918aee0..2faf1c71720 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java @@ -127,7 +127,7 @@ public class TestWeakIdentityMap extends LuceneTestCase { try { for (int t = 0; t < threadCount; t++) { - final Random rnd = new Random(random.nextLong()); + final Random rnd = new Random(random().nextLong()); exec.execute(new Runnable() { public void run() { final int count = atLeast(rnd, 10000); diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java index 87c4efb2211..482f895bf01 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java @@ -94,7 +94,7 @@ public class TestBasicOperations extends LuceneTestCase { final int ITER2 = atLeast(100); for(int i=0;i(term, NO_OUTPUT)); } - FST fst = new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(0, 0, false); + FST fst = new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(0, 0, false); assertNotNull(fst); assertEquals(22, fst.getNodeCount()); assertEquals(27, fst.getArcCount()); @@ -177,7 +177,7 @@ public class TestFSTs extends LuceneTestCase { for(int idx=0;idx(terms2[idx], (long) idx)); } - final FST fst = new FSTTester(random, dir, inputMode, pairs, outputs, true).doTest(0, 0, false); + final FST fst = new FSTTester(random(), dir, inputMode, pairs, outputs, true).doTest(0, 0, false); assertNotNull(fst); assertEquals(22, fst.getNodeCount()); assertEquals(27, fst.getArcCount()); @@ -189,10 +189,10 @@ public class TestFSTs extends LuceneTestCase { final BytesRef NO_OUTPUT = outputs.getNoOutput(); final List> pairs = new ArrayList>(terms2.length); for(int idx=0;idx(terms2[idx], output)); } - final FST fst = new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(0, 0, false); + final FST fst = new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(0, 0, false); assertNotNull(fst); assertEquals(24, fst.getNodeCount()); assertEquals(30, fst.getArcCount()); @@ -225,7 +225,7 @@ public class TestFSTs extends LuceneTestCase { for(IntsRef term : terms) { pairs.add(new FSTTester.InputOutput(term, NO_OUTPUT)); } - new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(); } // PositiveIntOutput (ord) @@ -235,47 +235,47 @@ public class TestFSTs extends LuceneTestCase { for(int idx=0;idx(terms[idx], (long) idx)); } - new FSTTester(random, dir, inputMode, pairs, outputs, true).doTest(); + new FSTTester(random(), dir, inputMode, pairs, outputs, true).doTest(); } // PositiveIntOutput (random monotonically increasing positive number) { - final boolean doShare = random.nextBoolean(); + final boolean doShare = random().nextBoolean(); final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(doShare); final List> pairs = new ArrayList>(terms.length); long lastOutput = 0; for(int idx=0;idx(terms[idx], value)); } - new FSTTester(random, dir, inputMode, pairs, outputs, doShare).doTest(); + new FSTTester(random(), dir, inputMode, pairs, outputs, doShare).doTest(); } // PositiveIntOutput (random positive number) { - final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random.nextBoolean()); + final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random().nextBoolean()); final List> pairs = new ArrayList>(terms.length); for(int idx=0;idx(terms[idx], random.nextLong() & Long.MAX_VALUE)); + pairs.add(new FSTTester.InputOutput(terms[idx], random().nextLong() & Long.MAX_VALUE)); } - new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(); } // Pair { - final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton(random.nextBoolean()); - final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton(random.nextBoolean()); + final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton(random().nextBoolean()); + final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton(random().nextBoolean()); final PairOutputs outputs = new PairOutputs(o1, o2); final List>> pairs = new ArrayList>>(terms.length); long lastOutput = 0; for(int idx=0;idx>(terms[idx], outputs.newPair((long) idx, value))); } - new FSTTester>(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester>(random(), dir, inputMode, pairs, outputs, false).doTest(); } // Sequence-of-bytes @@ -284,10 +284,10 @@ public class TestFSTs extends LuceneTestCase { final BytesRef NO_OUTPUT = outputs.getNoOutput(); final List> pairs = new ArrayList>(terms.length); for(int idx=0;idx(terms[idx], output)); } - new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(); } // Sequence-of-ints @@ -303,7 +303,7 @@ public class TestFSTs extends LuceneTestCase { } pairs.add(new FSTTester.InputOutput(terms[idx], output)); } - new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(); } // Up to two positive ints, shared, generally but not @@ -317,15 +317,15 @@ public class TestFSTs extends LuceneTestCase { long lastOutput = 0; for(int idx=0;idx(terms[idx], output)); } - new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(); } } @@ -665,7 +665,7 @@ public class TestFSTs extends LuceneTestCase { if (random.nextBoolean()) { // seek to term that doesn't exist: while(true) { - final IntsRef term = toIntsRef(getRandomString(), inputMode); + final IntsRef term = toIntsRef(getRandomString(random), inputMode); int pos = Collections.binarySearch(pairs, new InputOutput(term, null)); if (pos < 0) { pos = -(pos+1); @@ -762,7 +762,7 @@ public class TestFSTs extends LuceneTestCase { } else if (upto != -1 && upto < 0.75 * pairs.size() && random.nextBoolean()) { int attempt = 0; for(;attempt<10;attempt++) { - IntsRef term = toIntsRef(getRandomString(), inputMode); + IntsRef term = toIntsRef(getRandomString(random), inputMode); if (!termsMap.containsKey(term) && term.compareTo(pairs.get(upto).input) > 0) { int pos = Collections.binarySearch(pairs, new InputOutput(term, null)); assert pos < 0; @@ -1034,6 +1034,7 @@ public class TestFSTs extends LuceneTestCase { } private void testRandomWords(int maxNumWords, int numIter) throws IOException { + Random random = new Random(random().nextLong()); for(int iter=0;iter termsSet = new HashSet(); IntsRef[] terms = new IntsRef[numWords]; while(termsSet.size() < numWords) { - final String term = getRandomString(); + final String term = getRandomString(random); termsSet.add(toIntsRef(term, inputMode)); } doTest(inputMode, termsSet.toArray(new IntsRef[termsSet.size()])); @@ -1051,7 +1052,7 @@ public class TestFSTs extends LuceneTestCase { } } - static String getRandomString() { + static String getRandomString(Random random) { final String term; if (random.nextBoolean()) { term = _TestUtil.randomRealisticUnicodeString(random); @@ -1066,7 +1067,7 @@ public class TestFSTs extends LuceneTestCase { @Nightly public void testBigSet() throws IOException { - testRandomWords(_TestUtil.nextInt(random, 50000, 60000), 1); + testRandomWords(_TestUtil.nextInt(random(), 50000, 60000), 1); } static String inputToString(int inputMode, IntsRef term) { @@ -1096,9 +1097,9 @@ public class TestFSTs extends LuceneTestCase { Codec.setDefault(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat())); } - final LineFileDocs docs = new LineFileDocs(random, defaultCodecSupportsDocValues()); + final LineFileDocs docs = new LineFileDocs(random(), defaultCodecSupportsDocValues()); final int RUN_TIME_MSEC = atLeast(500); - final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(-1).setRAMBufferSizeMB(64); + final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(-1).setRAMBufferSizeMB(64); final File tempDir = _TestUtil.getTempDir("fstlines"); final MockDirectoryWrapper dir = newFSDirectory(tempDir); final IndexWriter writer = new IndexWriter(dir, conf); @@ -1111,13 +1112,13 @@ public class TestFSTs extends LuceneTestCase { } IndexReader r = IndexReader.open(writer, true); writer.close(); - final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random.nextBoolean()); + final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random().nextBoolean()); - final boolean doRewrite = random.nextBoolean(); + final boolean doRewrite = random().nextBoolean(); Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, doRewrite); - boolean storeOrd = random.nextBoolean(); + boolean storeOrd = random().nextBoolean(); if (VERBOSE) { if (storeOrd) { System.out.println("FST stores ord"); @@ -1163,6 +1164,7 @@ public class TestFSTs extends LuceneTestCase { } if (ord > 0) { + final Random random = new Random(random().nextLong()); for(int rewriteIter=0;rewriteIter<2;rewriteIter++) { if (rewriteIter == 1) { if (doRewrite) { @@ -1177,7 +1179,7 @@ public class TestFSTs extends LuceneTestCase { final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum(fst); int num = atLeast(1000); for(int iter=0;iter fst = builder.finish(); @@ -1885,7 +1887,7 @@ public class TestFSTs extends LuceneTestCase { public void testInternalFinalState() throws Exception { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true); - final boolean willRewrite = random.nextBoolean(); + final boolean willRewrite = random().nextBoolean(); final Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, willRewrite); builder.add(Util.toIntsRef(new BytesRef("stat"), new IntsRef()), outputs.getNoOutput()); builder.add(Util.toIntsRef(new BytesRef("station"), new IntsRef()), outputs.getNoOutput()); @@ -2059,6 +2061,7 @@ public class TestFSTs extends LuceneTestCase { } public void testShortestPathsRandom() throws Exception { + final Random random = random(); int numWords = atLeast(1000); final TreeMap slowCompletor = new TreeMap(); @@ -2168,6 +2171,7 @@ public class TestFSTs extends LuceneTestCase { final Builder> builder = new Builder>(FST.INPUT_TYPE.BYTE1, outputs); final IntsRef scratch = new IntsRef(); + Random random = random(); for (int i = 0; i < numWords; i++) { String s; while (true) { diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessage.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessage.java index 0122391a83a..51be9b6cf80 100644 --- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessage.java +++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessage.java @@ -123,49 +123,49 @@ public class TestReproduceMessage extends WithNestedTests { public void testAssumeBeforeClass() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.BEFORE_CLASS; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test @Ignore public void testAssumeInitializer() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.INITIALIZER; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test public void testAssumeRule() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.RULE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertEquals("", runAndReturnSyserr()); } @Test public void testAssumeBefore() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.BEFORE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test public void testAssumeTest() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.TEST; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test public void testAssumeAfter() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.AFTER; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test @Ignore public void testAssumeAfterClass() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.AFTER_CLASS; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } /* @@ -190,32 +190,44 @@ public class TestReproduceMessage extends WithNestedTests { public void testFailureRule() throws Exception { type = SoreType.FAILURE; where = SorePoint.RULE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + + final String syserr = runAndReturnSyserr(); + + super.prevSysOut.println(getSysErr() + "\n" + getSysOut()); + + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.class=*." + Nested.class.getSimpleName())); } @Test public void testFailureBefore() throws Exception { type = SoreType.FAILURE; where = SorePoint.BEFORE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.class=*." + Nested.class.getSimpleName())); } @Test public void testFailureTest() throws Exception { type = SoreType.FAILURE; where = SorePoint.TEST; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.class=*." + Nested.class.getSimpleName())); } @Test public void testFailureAfter() throws Exception { type = SoreType.FAILURE; where = SorePoint.AFTER; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.class=*." + Nested.class.getSimpleName())); } @Test @Ignore @@ -247,32 +259,40 @@ public class TestReproduceMessage extends WithNestedTests { public void testErrorRule() throws Exception { type = SoreType.ERROR; where = SorePoint.RULE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.class=*." + Nested.class.getSimpleName())); } @Test public void testErrorBefore() throws Exception { type = SoreType.ERROR; where = SorePoint.BEFORE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.class=*." + Nested.class.getSimpleName())); } @Test public void testErrorTest() throws Exception { type = SoreType.ERROR; where = SorePoint.TEST; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.class=*." + Nested.class.getSimpleName())); } @Test public void testErrorAfter() throws Exception { type = SoreType.ERROR; where = SorePoint.AFTER; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.class=*." + Nested.class.getSimpleName())); } @Test @Ignore @@ -286,8 +306,8 @@ public class TestReproduceMessage extends WithNestedTests { JUnitCore.runClasses(Nested.class); String err = getSysErr(); - //super.prevSysErr.println("Type: " + type + ", point: " + where + " resulted in:\n" + err); - //super.prevSysErr.println("---"); + // super.prevSysErr.println("Type: " + type + ", point: " + where + " resulted in:\n" + err); + // super.prevSysErr.println("---"); return err; } } diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSeedFromUncaught.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSeedFromUncaught.java index 911882f3e1d..a49cff79c4d 100644 --- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSeedFromUncaught.java +++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSeedFromUncaught.java @@ -22,6 +22,7 @@ import org.junit.Assert; import org.junit.Test; import org.junit.runner.JUnitCore; import org.junit.runner.Result; +import org.junit.runner.notification.Failure; /** * Check that uncaught exceptions result in seed info being dumped to @@ -53,9 +54,9 @@ public class TestSeedFromUncaught extends WithNestedTests { public void testUncaughtDumpsSeed() { Result result = JUnitCore.runClasses(ThrowInUncaught.class); Assert.assertEquals(1, result.getFailureCount()); - String consoleOut = super.getSysErr() + "\n\n" + super.getSysOut(); - Assert.assertTrue(consoleOut.contains("-Dtests.seed=")); - Assert.assertTrue(consoleOut.contains("-Dtestmethod=testFoo")); - Assert.assertTrue(consoleOut.contains("foobar")); + Failure f = result.getFailures().get(0); + String trace = f.getTrace(); + Assert.assertTrue(trace.contains("SeedInfo.seed(")); + Assert.assertTrue(trace.contains("foobar")); } } diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java index 5b34754e051..6daa1c20735 100644 --- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java +++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java @@ -33,6 +33,7 @@ public class TestSetupTeardownChaining extends WithNestedTests { @Override public void setUp() throws Exception { // missing call. + System.out.println("Hello."); } @Test diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/WithNestedTests.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/WithNestedTests.java index 145d945044d..af844ee9f82 100644 --- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/WithNestedTests.java +++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/WithNestedTests.java @@ -30,6 +30,8 @@ import org.junit.rules.TestRule; import org.junit.runner.Description; import org.junit.runners.model.Statement; +import com.carrotsearch.randomizedtesting.RandomizedRunner; + /** * An abstract test class that prepares nested test classes to run. * A nested test class will assume it's executed under control of this @@ -43,12 +45,11 @@ import org.junit.runners.model.Statement; * cause havoc (static fields). */ public abstract class WithNestedTests { - public static ThreadLocal runsAsNested = new ThreadLocal() { - @Override - protected Boolean initialValue() { - return false; - } - }; + /** + * This can no longer be thread local because {@link RandomizedRunner} runs + * suites in an isolated threadgroup/thread. + */ + public static volatile boolean runsAsNested; public static abstract class AbstractNestedTest extends LuceneTestCase { @ClassRule @@ -65,7 +66,7 @@ public abstract class WithNestedTests { }; protected static boolean isRunningNested() { - return runsAsNested.get() != null && runsAsNested.get(); + return runsAsNested; } } @@ -96,13 +97,13 @@ public abstract class WithNestedTests { } } - runsAsNested.set(true); + runsAsNested = true; } @After public final void after() { - runsAsNested.set(false); - + runsAsNested = false; + if (suppressOutputStreams) { System.out.flush(); System.err.flush(); diff --git a/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java b/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java index 2f7605d5d6e..c900e28c828 100644 --- a/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java +++ b/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java @@ -51,16 +51,16 @@ public class TestPackedInts extends LuceneTestCase { for (int iter = 0; iter < num; iter++) { long ceil = 2; for(int nbits=1;nbits<63;nbits++) { - final int valueCount = 100+random.nextInt(500); + final int valueCount = 100+random().nextInt(500); final Directory d = newDirectory(); - IndexOutput out = d.createOutput("out.bin", newIOContext(random)); + IndexOutput out = d.createOutput("out.bin", newIOContext(random())); PackedInts.Writer w = PackedInts.getWriter( out, valueCount, nbits); final long[] values = new long[valueCount]; for(int i=0;i + + + + diff --git a/lucene/test-framework/lib/junit4-ant-1.1.0.jar.sha1 b/lucene/test-framework/lib/junit4-ant-1.1.0.jar.sha1 new file mode 100755 index 00000000000..994e8fc5abd --- /dev/null +++ b/lucene/test-framework/lib/junit4-ant-1.1.0.jar.sha1 @@ -0,0 +1 @@ +773996a80119ea828613eaee11a9c303ede78a03 diff --git a/lucene/test-framework/lib/junit4-ant-LICENSE-ASL.txt b/lucene/test-framework/lib/junit4-ant-LICENSE-ASL.txt new file mode 100755 index 00000000000..7a4a3ea2424 --- /dev/null +++ b/lucene/test-framework/lib/junit4-ant-LICENSE-ASL.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/lucene/test-framework/lib/junit4-ant-NOTICE.txt b/lucene/test-framework/lib/junit4-ant-NOTICE.txt new file mode 100755 index 00000000000..3c321aa2516 --- /dev/null +++ b/lucene/test-framework/lib/junit4-ant-NOTICE.txt @@ -0,0 +1,12 @@ + +JUnit4, parallel JUnit execution for ANT +Copyright 2011-2012 Carrot Search s.c. +http://labs.carrotsearch.com/randomizedtesting.html + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product includes asm (asmlib), BSD license +This product includes Google Guava, ASL license +This product includes simple-xml, ASL license +This product includes Google GSON, ASL license diff --git a/lucene/test-framework/lib/randomizedtesting-runner-1.1.0.jar.sha1 b/lucene/test-framework/lib/randomizedtesting-runner-1.1.0.jar.sha1 new file mode 100755 index 00000000000..d62c4a9c86a --- /dev/null +++ b/lucene/test-framework/lib/randomizedtesting-runner-1.1.0.jar.sha1 @@ -0,0 +1 @@ +32682aa5df3aa618bad5eb54a9b6d186a7956f9d diff --git a/lucene/test-framework/lib/randomizedtesting-runner-LICENSE-ASL.txt b/lucene/test-framework/lib/randomizedtesting-runner-LICENSE-ASL.txt new file mode 100755 index 00000000000..7a4a3ea2424 --- /dev/null +++ b/lucene/test-framework/lib/randomizedtesting-runner-LICENSE-ASL.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/lucene/test-framework/lib/randomizedtesting-runner-NOTICE.txt b/lucene/test-framework/lib/randomizedtesting-runner-NOTICE.txt new file mode 100755 index 00000000000..e657788259e --- /dev/null +++ b/lucene/test-framework/lib/randomizedtesting-runner-NOTICE.txt @@ -0,0 +1,12 @@ + +RandomizedRunner, a JUnit @Runner for randomized tests (and more) +Copyright 2011-2012 Carrot Search s.c. +http://labs.carrotsearch.com/randomizedtesting.html + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product includes asm (asmlib), BSD license +This product includes Google Guava, ASL license +This product includes simple-xml, ASL license +This product includes Google GSON, ASL license diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java index 58d89156129..4bc3a094c74 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java @@ -189,7 +189,7 @@ public abstract class CollationTestBase extends LuceneTestCase { String dkResult) throws Exception { Directory indexStore = newDirectory(); IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); // document data: // the tracer field is used to determine which document was hit @@ -267,7 +267,7 @@ public abstract class CollationTestBase extends LuceneTestCase { public void assertThreadSafe(final Analyzer analyzer) throws Exception { int numTestPoints = 100; - int numThreads = _TestUtil.nextInt(random, 3, 5); + int numThreads = _TestUtil.nextInt(random(), 3, 5); final HashMap map = new HashMap(); // create a map up front. @@ -275,7 +275,7 @@ public abstract class CollationTestBase extends LuceneTestCase { // and ensure they are the same as the ones we produced in serial fashion. for (int i = 0; i < numTestPoints; i++) { - String term = _TestUtil.randomSimpleString(random); + String term = _TestUtil.randomSimpleString(random()); TokenStream ts = analyzer.tokenStream("fake", new StringReader(term)); TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); BytesRef bytes = termAtt.getBytesRef(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java index b1ab2597176..21df176f0dc 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java @@ -64,7 +64,8 @@ public final class MockAnalyzer extends Analyzer { */ public MockAnalyzer(Random random, CharacterRunAutomaton runAutomaton, boolean lowerCase, CharacterRunAutomaton filter, boolean enablePositionIncrements) { super(new PerFieldReuseStrategy()); - this.random = random; + // TODO: this should be solved in a different way; Random should not be shared (!). + this.random = new Random(random.nextLong()); this.runAutomaton = runAutomaton; this.lowerCase = lowerCase; this.filter = filter; diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java index 9ab49583242..be3bf89e807 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java @@ -62,10 +62,8 @@ public class RandomIndexWriter implements Closeable { public MockIndexWriter(Random r, Directory dir, IndexWriterConfig conf) throws IOException { super(dir, conf); - // must make a private random since our methods are - // called from different threads; else test failures may - // not be reproducible from the original seed - this.r = new Random(r.nextInt()); + // TODO: this should be solved in a different way; Random should not be shared (!). + this.r = new Random(r.nextLong()); } @Override @@ -93,7 +91,8 @@ public class RandomIndexWriter implements Closeable { /** create a RandomIndexWriter with the provided config */ public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException { - this.r = r; + // TODO: this should be solved in a different way; Random should not be shared (!). + this.r = new Random(r.nextLong()); w = new MockIndexWriter(r, dir, c); flushAt = _TestUtil.nextInt(r, 10, 1000); codec = w.getConfig().getCodec(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java index 042afa7a04f..164b204cf5b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java @@ -19,11 +19,7 @@ package org.apache.lucene.index; import java.io.File; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -138,16 +134,16 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas // Occasional longish pause if running // nightly - if (LuceneTestCase.TEST_NIGHTLY && random.nextInt(6) == 3) { + if (LuceneTestCase.TEST_NIGHTLY && random().nextInt(6) == 3) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": now long sleep"); } - Thread.sleep(_TestUtil.nextInt(random, 50, 500)); + Thread.sleep(_TestUtil.nextInt(random(), 50, 500)); } // Rate limit ingest rate: - if (random.nextInt(7) == 5) { - Thread.sleep(_TestUtil.nextInt(random, 1, 10)); + if (random().nextInt(7) == 5) { + Thread.sleep(_TestUtil.nextInt(random(), 1, 10)); if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": done sleep"); } @@ -160,21 +156,21 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas // Maybe add randomly named field final String addedField; - if (random.nextBoolean()) { - addedField = "extra" + random.nextInt(40); + if (random().nextBoolean()) { + addedField = "extra" + random().nextInt(40); doc.add(newField(addedField, "a random field", TextField.TYPE_STORED)); } else { addedField = null; } - if (random.nextBoolean()) { + if (random().nextBoolean()) { - if (random.nextBoolean()) { + if (random().nextBoolean()) { // Add/update doc block: final String packID; final SubDocs delSubDocs; - if (toDeleteSubDocs.size() > 0 && random.nextBoolean()) { - delSubDocs = toDeleteSubDocs.get(random.nextInt(toDeleteSubDocs.size())); + if (toDeleteSubDocs.size() > 0 && random().nextBoolean()) { + delSubDocs = toDeleteSubDocs.get(random().nextInt(toDeleteSubDocs.size())); assert !delSubDocs.deleted; toDeleteSubDocs.remove(delSubDocs); // Update doc block, replacing prior packID @@ -195,7 +191,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas docsList.add(_TestUtil.cloneDocument(doc)); docIDs.add(doc.get("docid")); - final int maxDocCount = _TestUtil.nextInt(random, 1, 10); + final int maxDocCount = _TestUtil.nextInt(random(), 1, 10); while(docsList.size() < maxDocCount) { doc = docs.nextDoc(); if (doc == null) { @@ -224,7 +220,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas } doc.removeField("packID"); - if (random.nextInt(5) == 2) { + if (random().nextInt(5) == 2) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": buffer del id:" + packID); } @@ -240,7 +236,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas addDocument(new Term("docid", docid), doc); addCount.getAndIncrement(); - if (random.nextInt(5) == 3) { + if (random().nextInt(5) == 3) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("docid")); } @@ -259,7 +255,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas updateDocument(new Term("docid", docid), doc); addCount.getAndIncrement(); - if (random.nextInt(5) == 3) { + if (random().nextInt(5) == 3) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("docid")); } @@ -267,7 +263,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas } } - if (random.nextInt(30) == 17) { + if (random().nextInt(30) == 17) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": apply " + toDeleteIDs.size() + " deletes"); } @@ -322,7 +318,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas } protected void runSearchThreads(final long stopTimeMS) throws Exception { - final int numThreads = _TestUtil.nextInt(random, 1, 5); + final int numThreads = _TestUtil.nextInt(random(), 1, 5); final Thread[] searchThreads = new Thread[numThreads]; final AtomicInteger totHits = new AtomicInteger(); @@ -357,7 +353,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas trigger = 1; } else { trigger = totTermCount.get()/30; - shift = random.nextInt(trigger); + shift = random().nextInt(trigger); } while (System.currentTimeMillis() < stopTimeMS) { BytesRef term = termsEnum.next(); @@ -418,12 +414,13 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas final long t0 = System.currentTimeMillis(); + Random random = new Random(random().nextLong()); final LineFileDocs docs = new LineFileDocs(random, defaultCodecSupportsDocValues()); final File tempDir = _TestUtil.getTempDir(testName); dir = newFSDirectory(tempDir); ((MockDirectoryWrapper) dir).setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves. - final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). - setInfoStream(new FailOnNonBulkMergesInfoStream()); + final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, + new MockAnalyzer(random())).setInfoStream(new FailOnNonBulkMergesInfoStream()); if (LuceneTestCase.TEST_NIGHTLY) { // newIWConfig makes smallish max seg size, which @@ -468,11 +465,11 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas writer = new IndexWriter(dir, conf); _TestUtil.reduceOpenFiles(writer); - final ExecutorService es = random.nextBoolean() ? null : Executors.newCachedThreadPool(new NamedThreadFactory(testName)); + final ExecutorService es = random().nextBoolean() ? null : Executors.newCachedThreadPool(new NamedThreadFactory(testName)); doAfterWriter(es); - final int NUM_INDEX_THREADS = _TestUtil.nextInt(random, 2, 4); + final int NUM_INDEX_THREADS = _TestUtil.nextInt(random(), 2, 4); final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : RANDOM_MULTIPLIER; diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java index 1c9145ff92b..9d798e6f1fd 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java @@ -18,6 +18,7 @@ package org.apache.lucene.search; */ import java.util.BitSet; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; @@ -54,6 +55,7 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { + Random random = random(); directory = newDirectory(); stopword = "" + randomChar(); CharacterRunAutomaton stopset = new CharacterRunAutomaton(BasicAutomata.makeString(stopword)); @@ -109,7 +111,7 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { static String randomFieldContents() { // TODO: zipf-like distribution StringBuilder sb = new StringBuilder(); - int numTerms = random.nextInt(15); + int numTerms = random().nextInt(15); for (int i = 0; i < numTerms; i++) { if (sb.length() > 0) { sb.append(' '); // whitespace @@ -123,9 +125,9 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { * returns random character (a-z) */ static char randomChar() { - return (char) _TestUtil.nextInt(random, 'a', 'z'); + return (char) _TestUtil.nextInt(random(), 'a', 'z'); } - + /** * returns a term suitable for searching. * terms are single characters in lowercase (a-z) @@ -170,9 +172,9 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { */ protected void assertSubsetOf(Query q1, Query q2, Filter filter) throws Exception { // TRUNK ONLY: test both filter code paths - if (filter != null && random.nextBoolean()) { - final boolean q1RandomAccess = random.nextBoolean(); - final boolean q2RandomAccess = random.nextBoolean(); + if (filter != null && random().nextBoolean()) { + final boolean q1RandomAccess = random().nextBoolean(); + final boolean q2RandomAccess = random().nextBoolean(); q1 = new FilteredQuery(q1, filter) { @Override protected boolean useRandomAccess(Bits bits, int firstFilterDoc) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java index 7ac387e37d1..21d6b982cec 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java @@ -518,29 +518,29 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { @Override public void run() { try { - final LineFileDocs docs = new LineFileDocs(random, defaultCodecSupportsDocValues()); + final LineFileDocs docs = new LineFileDocs(random(), defaultCodecSupportsDocValues()); int numDocs = 0; while (System.nanoTime() < endTimeNanos) { - final int what = random.nextInt(3); - final NodeState node = nodes[random.nextInt(nodes.length)]; + final int what = random().nextInt(3); + final NodeState node = nodes[random().nextInt(nodes.length)]; if (numDocs == 0 || what == 0) { node.writer.addDocument(docs.nextDoc()); numDocs++; } else if (what == 1) { - node.writer.updateDocument(new Term("docid", ""+random.nextInt(numDocs)), + node.writer.updateDocument(new Term("docid", ""+random().nextInt(numDocs)), docs.nextDoc()); numDocs++; } else { - node.writer.deleteDocuments(new Term("docid", ""+random.nextInt(numDocs))); + node.writer.deleteDocuments(new Term("docid", ""+random().nextInt(numDocs))); } // TODO: doc blocks too - if (random.nextInt(17) == 12) { + if (random().nextInt(17) == 12) { node.writer.commit(); } - if (random.nextInt(17) == 12) { - nodes[random.nextInt(nodes.length)].reopen(); + if (random().nextInt(17) == 12) { + nodes[random().nextInt(nodes.length)].reopen(); } } } catch (Throwable t) { @@ -563,7 +563,7 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { nodes = new NodeState[numNodes]; for(int nodeID=0;nodeID getTestMethods(Class suiteClass, List> methods) { + // We will return all methods starting with test* and rely on further validation to weed + // out static or otherwise invalid test methods. + List copy = mutableCopy1(flatten(methods)); + Iterator i =copy.iterator(); + while (i.hasNext()) { + Method m= i.next(); + if (!m.getName().startsWith("test") || + !Modifier.isPublic(m.getModifiers()) || + Modifier.isStatic(m.getModifiers()) || + m.getParameterTypes().length != 0) { + i.remove(); + } + } + return copy; + } +} diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnitDividingSelector.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnitDividingSelector.java deleted file mode 100644 index 5a9509c5a82..00000000000 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnitDividingSelector.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.lucene.util; -import java.io.File; - -import org.apache.tools.ant.BuildException; -import org.apache.tools.ant.types.Parameter; -import org.apache.tools.ant.types.selectors.BaseExtendSelector; - -/** Divides filesets into equal groups */ -public class LuceneJUnitDividingSelector extends BaseExtendSelector { - private int counter; - /** Number of total parts to split. */ - private int divisor; - /** Current part to accept. */ - private int part; - - @Override - public void setParameters(Parameter[] pParameters) { - super.setParameters(pParameters); - for (int j = 0; j < pParameters.length; j++) { - Parameter p = pParameters[j]; - if ("divisor".equalsIgnoreCase(p.getName())) { - divisor = Integer.parseInt(p.getValue()); - } - else if ("part".equalsIgnoreCase(p.getName())) { - part = Integer.parseInt(p.getValue()); - } - else { - throw new BuildException("unknown " + p.getName()); - } - } - } - - @Override - public void verifySettings() { - super.verifySettings(); - if (divisor <= 0 || part <= 0) { - throw new BuildException("part or divisor not set"); - } - if (part > divisor) { - throw new BuildException("part must be <= divisor"); - } - } - - @Override - public boolean isSelected(File dir, String name, File path) { - counter = counter % divisor + 1; - return counter == part; - } -} diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnitResultFormatter.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnitResultFormatter.java deleted file mode 100644 index c67b9bc4c66..00000000000 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnitResultFormatter.java +++ /dev/null @@ -1,294 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.lucene.util; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.IOException; -import java.io.OutputStream; -import java.text.NumberFormat; -import java.util.logging.LogManager; - -import junit.framework.AssertionFailedError; -import junit.framework.Test; - -import org.apache.lucene.store.LockReleaseFailedException; -import org.apache.lucene.store.NativeFSLockFactory; -import org.apache.tools.ant.taskdefs.optional.junit.JUnitResultFormatter; -import org.apache.tools.ant.taskdefs.optional.junit.JUnitTest; -import org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner; -import org.apache.tools.ant.util.FileUtils; -import org.apache.tools.ant.util.StringUtils; -import org.junit.Ignore; - -/** - * Just like BriefJUnitResultFormatter "brief" bundled with ant, - * except all formatted text is buffered until the test suite is finished. - * At this point, the output is written at once in synchronized fashion. - * This way tests can run in parallel without interleaving output. - */ -public class LuceneJUnitResultFormatter implements JUnitResultFormatter { - private static final double ONE_SECOND = 1000.0; - - private static final NativeFSLockFactory lockFactory; - - /** Where to write the log to. */ - private OutputStream out; - - /** Formatter for timings. */ - private NumberFormat numberFormat = NumberFormat.getInstance(); - - /** Output suite has written to System.out */ - private String systemOutput = null; - - /** Output suite has written to System.err */ - private String systemError = null; - - /** Buffer output until the end of the test */ - private ByteArrayOutputStream sb; // use a BOS for our mostly ascii-output - - private static final org.apache.lucene.store.Lock lock; - - static { - File lockDir = new File( - System.getProperty("tests.lockdir", System.getProperty("java.io.tmpdir")), - "lucene_junit_lock"); - lockDir.mkdirs(); - if (!lockDir.exists()) { - throw new RuntimeException("Could not make Lock directory:" + lockDir); - } - try { - lockFactory = new NativeFSLockFactory(lockDir); - lock = lockFactory.makeLock("junit_lock"); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - /** Constructor for LuceneJUnitResultFormatter. */ - public LuceneJUnitResultFormatter() { - } - - /** - * Sets the stream the formatter is supposed to write its results to. - * @param out the output stream to write to - */ - public void setOutput(OutputStream out) { - this.out = out; - } - - /** - * @see JUnitResultFormatter#setSystemOutput(String) - */ - /** {@inheritDoc}. */ - public void setSystemOutput(String out) { - systemOutput = out; - } - - /** - * @see JUnitResultFormatter#setSystemError(String) - */ - /** {@inheritDoc}. */ - public void setSystemError(String err) { - systemError = err; - } - - - /** - * The whole testsuite started. - * @param suite the test suite - */ - public synchronized void startTestSuite(JUnitTest suite) { - if (out == null) { - return; // Quick return - no output do nothing. - } - sb = new ByteArrayOutputStream(); // don't reuse, so its gc'ed - try { - LogManager.getLogManager().readConfiguration(); - } catch (Exception e) {} - append("Testsuite: "); - append(suite.getName()); - append(StringUtils.LINE_SEP); - } - - /** - * The whole testsuite ended. - * @param suite the test suite - */ - public synchronized void endTestSuite(JUnitTest suite) { - append("Tests run: "); - append(suite.runCount()); - append(", Failures: "); - append(suite.failureCount()); - append(", Errors: "); - append(suite.errorCount()); - append(", Time elapsed: "); - append(numberFormat.format(suite.getRunTime() / ONE_SECOND)); - append(" sec"); - append(StringUtils.LINE_SEP); - append(StringUtils.LINE_SEP); - - // append the err and output streams to the log - if (systemOutput != null && systemOutput.length() > 0) { - append("------------- Standard Output ---------------") - .append(StringUtils.LINE_SEP) - .append(systemOutput) - .append("------------- ---------------- ---------------") - .append(StringUtils.LINE_SEP); - } - - // HACK: junit gives us no way to do this in LuceneTestCase - try { - Class clazz = Class.forName(suite.getName()); - Ignore ignore = clazz.getAnnotation(Ignore.class); - if (ignore != null) { - if (systemError == null) systemError = ""; - systemError += "NOTE: Ignoring test class '" + clazz.getSimpleName() + "': " - + ignore.value() + StringUtils.LINE_SEP; - } - } catch (ClassNotFoundException e) { /* no problem */ } - // END HACK - - if (systemError != null && systemError.length() > 0) { - append("------------- Standard Error -----------------") - .append(StringUtils.LINE_SEP) - .append(systemError) - .append("------------- ---------------- ---------------") - .append(StringUtils.LINE_SEP); - } - - if (out != null) { - try { - lock.obtain(5000); - try { - sb.writeTo(out); - out.flush(); - } finally { - try { - lock.release(); - } catch(LockReleaseFailedException e) { - // well lets pretend its released anyway - } - } - } catch (IOException e) { - throw new RuntimeException("unable to write results", e); - } finally { - if (out != System.out && out != System.err) { - FileUtils.close(out); - } - } - } - } - - /** - * A test started. - * @param test a test - */ - public void startTest(Test test) { - } - - /** - * A test ended. - * @param test a test - */ - public void endTest(Test test) { - } - - /** - * Interface TestListener for JUnit <= 3.4. - * - *

A Test failed. - * @param test a test - * @param t the exception thrown by the test - */ - public void addFailure(Test test, Throwable t) { - formatError("\tFAILED", test, t); - } - - /** - * Interface TestListener for JUnit > 3.4. - * - *

A Test failed. - * @param test a test - * @param t the assertion failed by the test - */ - public void addFailure(Test test, AssertionFailedError t) { - addFailure(test, (Throwable) t); - } - - /** - * A test caused an error. - * @param test a test - * @param error the error thrown by the test - */ - public void addError(Test test, Throwable error) { - formatError("\tCaused an ERROR", test, error); - } - - /** - * Format the test for printing.. - * @param test a test - * @return the formatted testname - */ - protected String formatTest(Test test) { - if (test == null) { - return "Null Test: "; - } else { - return "Testcase: " + test.toString() + ":"; - } - } - - /** - * Format an error and print it. - * @param type the type of error - * @param test the test that failed - * @param error the exception that the test threw - */ - protected synchronized void formatError(String type, Test test, - Throwable error) { - if (test != null) { - endTest(test); - } - - append(formatTest(test) + type); - append(StringUtils.LINE_SEP); - append(error.getMessage()); - append(StringUtils.LINE_SEP); - String strace = JUnitTestRunner.getFilteredTrace(error); - append(strace); - append(StringUtils.LINE_SEP); - append(StringUtils.LINE_SEP); - } - - public LuceneJUnitResultFormatter append(String s) { - if (s == null) - s = "(null)"; - try { - sb.write(s.getBytes()); // intentionally use default charset, its a console. - } catch (IOException e) { - throw new RuntimeException(e); - } - return this; - } - - public LuceneJUnitResultFormatter append(long l) { - return append(Long.toString(l)); - } -} - diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java index 7489777085a..d5facc20e29 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java @@ -63,12 +63,12 @@ import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.LogDocMergePolicy; import org.apache.lucene.index.LogMergePolicy; import org.apache.lucene.index.MockRandomMergePolicy; +import org.apache.lucene.index.ParallelAtomicReader; +import org.apache.lucene.index.ParallelCompositeReader; import org.apache.lucene.index.RandomCodec; import org.apache.lucene.index.RandomDocumentsWriterPerThreadPool; import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.SerialMergeScheduler; -import org.apache.lucene.index.ParallelAtomicReader; -import org.apache.lucene.index.ParallelCompositeReader; import org.apache.lucene.index.SlowCompositeReaderWrapper; import org.apache.lucene.index.ThreadAffinityDocumentsWriterThreadPool; import org.apache.lucene.index.TieredMergePolicy; @@ -98,15 +98,22 @@ import org.junit.Assume; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Ignore; import org.junit.Rule; import org.junit.internal.AssumptionViolatedException; -import org.junit.rules.*; -import org.junit.runner.*; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runner.RunWith; +import org.junit.runner.Runner; import org.junit.runner.notification.RunListener; import org.junit.runners.model.MultipleFailureException; import org.junit.runners.model.Statement; +import com.carrotsearch.randomizedtesting.JUnit4MethodProvider; +import com.carrotsearch.randomizedtesting.RandomizedContext; +import com.carrotsearch.randomizedtesting.RandomizedRunner; +import com.carrotsearch.randomizedtesting.annotations.*; + /** * Base class for all Lucene unit tests, Junit3 or Junit4 variant. *

@@ -133,15 +140,23 @@ import org.junit.runners.model.Statement; * if you annotate your derived class correctly with the annotations above * @see #assertSaneFieldCaches(String) */ - -@RunWith(LuceneTestCaseRunner.class) +@TestMethodProviders({ + LuceneJUnit3MethodProvider.class, + JUnit4MethodProvider.class +}) +@Validators({ + RequireAssertions.class, + NoStaticHooksShadowing.class +}) +@RunWith(RandomizedRunner.class) +@ThreadLeaks(failTestIfLeaking = false) public abstract class LuceneTestCase extends Assert { /** * true iff tests are run in verbose mode. Note: if it is false, tests are not * expected to print any messages. */ public static final boolean VERBOSE = Boolean.getBoolean("tests.verbose"); - + public static final boolean INFOSTREAM = Boolean.parseBoolean(System.getProperty("tests.infostream", Boolean.toString(VERBOSE))); /** Use this constant when creating Analyzers and any other version-dependent stuff. @@ -149,23 +164,16 @@ public abstract class LuceneTestCase extends Assert { */ public static final Version TEST_VERSION_CURRENT = Version.LUCENE_40; - /** - * If this is set, it is the only method that should run. - */ - static final String TEST_METHOD; - /** Create indexes in this directory, optimally use a subdir, named after the test */ public static final File TEMP_DIR; static { - String method = System.getProperty("testmethod", "").trim(); - TEST_METHOD = method.length() == 0 ? null : method; String s = System.getProperty("tempDir", System.getProperty("java.io.tmpdir")); if (s == null) throw new RuntimeException("To run tests, you need to define system property 'tempDir' or 'java.io.tmpdir'."); TEMP_DIR = new File(s); TEMP_DIR.mkdirs(); } - + /** set of directories we created, in afterclass we try to clean these up */ private static final Map tempDirs = Collections.synchronizedMap(new HashMap()); @@ -186,14 +194,8 @@ public abstract class LuceneTestCase extends Assert { public static final int TEST_ITER = Integer.parseInt(System.getProperty("tests.iter", "1")); /** Get the minimum number of times to run tests until a failure happens */ public static final int TEST_ITER_MIN = Integer.parseInt(System.getProperty("tests.iter.min", Integer.toString(TEST_ITER))); - /** Get the random seed for tests */ - public static final String TEST_SEED = System.getProperty("tests.seed", "random"); /** whether or not @nightly tests should run */ public static final boolean TEST_NIGHTLY = Boolean.parseBoolean(System.getProperty("tests.nightly", "false")); - /** whether or not @weekly tests should run */ - public static final boolean TEST_WEEKLY = Boolean.parseBoolean(System.getProperty("tests.weekly", "false")); - /** whether or not @slow tests should run */ - public static final boolean TEST_SLOW = Boolean.parseBoolean(System.getProperty("tests.slow", "false")); /** the line file used by LineFileDocs */ public static final String TEST_LINE_DOCS_FILE = System.getProperty("tests.linedocsfile", "europarl.lines.txt.gz"); /** whether or not to clean threads between test invocations: "false", "perMethod", "perClass" */ @@ -255,11 +257,28 @@ public abstract class LuceneTestCase extends Assert { private static List testClassesRun = new ArrayList(); - private static void initRandom() { - assert !random.initialized; - staticSeed = "random".equals(TEST_SEED) ? seedRand.nextLong() : ThreeLongs.fromString(TEST_SEED).l1; - random.setSeed(staticSeed); - random.initialized = true; + /** + * Access to the current {@link RandomizedContext}'s Random instance. It is safe to use + * this method from multiple threads, etc., but it should be called while within a runner's + * scope (so no static initializers). The returned {@link Random} instance will be + * different when this method is called inside a {@link BeforeClass} hook (static + * suite scope) and within {@link Before}/ {@link After} hooks or test methods. + * + *

The returned instance must not be shared with other threads or cross a single scope's + * boundary. For example, a {@link Random} acquired within a test method shouldn't be reused + * for another test case. + * + *

There is an overhead connected with getting the {@link Random} for a particular context + * and thread. It is better to cache the {@link Random} locally if tight loops with multiple + * invocations are present or create a derivative local {@link Random} for millions of calls + * like this: + *

+   * Random random = random();
+   * // tight loop with many invocations. 
+   * 
+ */ + public static Random random() { + return RandomizedContext.current().getRandom(); } @Deprecated @@ -312,10 +331,9 @@ public abstract class LuceneTestCase extends Assert { .around(new SubclassSetupTeardownRule()); @BeforeClass - public static void beforeClassLuceneTestCaseJ4() { + static void beforeClassLuceneTestCaseJ4() { testClassesRun.add(getTestClass().getSimpleName()); - initRandom(); tempDirs.clear(); stores = Collections.synchronizedMap(new IdentityHashMap()); @@ -365,22 +383,29 @@ public abstract class LuceneTestCase extends Assert { System.out.println("Loaded postingsFormat: '" + postingsFormat + "': " + PostingsFormat.forName(postingsFormat).getClass().getName()); } } - + savedInfoStream = InfoStream.getDefault(); + final boolean v = random().nextBoolean(); if (INFOSTREAM) { - // consume random for consistency - random.nextBoolean(); InfoStream.setDefault(new PrintStreamInfoStream(System.out)); } else { - if (random.nextBoolean()) { + if (v) { InfoStream.setDefault(new NullInfoStream()); } } + Class targetClass = RandomizedContext.current().getTargetClass(); + LuceneTestCase.useNoMemoryExpensiveCodec = + targetClass.isAnnotationPresent(UseNoMemoryExpensiveCodec.class); + if (useNoMemoryExpensiveCodec) { + System.err.println("NOTE: Using no memory expensive codecs (Memory, SimpleText) for " + + targetClass.getSimpleName() + "."); + } + PREFLEX_IMPERSONATION_IS_ACTIVE = false; savedCodec = Codec.getDefault(); final Codec codec; - int randomVal = random.nextInt(10); + int randomVal = random().nextInt(10); if ("Lucene3x".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal < 2)) { // preflex-only setup codec = Codec.forName("Lucene3x"); @@ -393,7 +418,7 @@ public abstract class LuceneTestCase extends Assert { } else if (!"random".equals(TEST_CODEC)) { codec = Codec.forName(TEST_CODEC); } else if ("random".equals(TEST_POSTINGSFORMAT)) { - codec = new RandomCodec(random, useNoMemoryExpensiveCodec); + codec = new RandomCodec(random(), useNoMemoryExpensiveCodec); } else { codec = new Lucene40Codec() { private final PostingsFormat format = PostingsFormat.forName(TEST_POSTINGSFORMAT); @@ -428,20 +453,20 @@ public abstract class LuceneTestCase extends Assert { } // END hack - locale = TEST_LOCALE.equals("random") ? randomLocale(random) : localeForName(TEST_LOCALE); + locale = TEST_LOCALE.equals("random") ? randomLocale(random()) : localeForName(TEST_LOCALE); Locale.setDefault(locale); // TimeZone.getDefault will set user.timezone to the default timezone of the user's locale. // So store the original property value and restore it at end. restoreProperties.put("user.timezone", System.getProperty("user.timezone")); savedTimeZone = TimeZone.getDefault(); - timeZone = TEST_TIMEZONE.equals("random") ? randomTimeZone(random) : TimeZone.getTimeZone(TEST_TIMEZONE); + timeZone = TEST_TIMEZONE.equals("random") ? randomTimeZone(random()) : TimeZone.getTimeZone(TEST_TIMEZONE); TimeZone.setDefault(timeZone); - similarity = random.nextBoolean() ? new DefaultSimilarity() : new RandomSimilarityProvider(random); + similarity = random().nextBoolean() ? new DefaultSimilarity() : new RandomSimilarityProvider(random()); testsFailed = false; } @AfterClass - public static void afterClassLuceneTestCaseJ4() { + static void afterClassLuceneTestCaseJ4() { for (Map.Entry e : restoreProperties.entrySet()) { if (e.getValue() == null) { System.clearProperty(e.getKey()); @@ -452,8 +477,8 @@ public abstract class LuceneTestCase extends Assert { restoreProperties.clear(); Throwable problem = null; - - if (! "false".equals(TEST_CLEAN_THREADS)) { + + if (!"false".equals(TEST_CLEAN_THREADS)) { int rogueThreads = threadCleanup("test class"); if (rogueThreads > 0) { // TODO: fail here once the leaks are fixed. @@ -502,16 +527,12 @@ public abstract class LuceneTestCase extends Assert { if (VERBOSE || testsFailed || problem != null) { printDebuggingInformation(codecDescription); } - - // reset seed - random.setSeed(0L); - random.initialized = false; - + if (problem != null) { throw new RuntimeException(problem); } } - + /** print some useful debugging information about the environment */ private static void printDebuggingInformation(String codecDescription) { System.err.println("NOTE: test params are: codec=" + codecDescription + @@ -580,6 +601,7 @@ public abstract class LuceneTestCase extends Assert { * Control the outcome of each test's output status (failure, assumption-failure). This * would ideally be handled by attaching a {@link RunListener} to a {@link Runner} (because * then we would be notified about static block failures). + * TODO: make this a test listener. */ private class TestResultInterceptorRule implements TestRule { @Override @@ -589,9 +611,8 @@ public abstract class LuceneTestCase extends Assert { public void evaluate() throws Throwable { try { base.evaluate(); - } catch (AssumptionViolatedException e) { - assumptionIgnored(e, description); - throw e; + } catch (AssumptionViolatedException t) { + throw t; } catch (Throwable t) { failed(t, description); throw t; @@ -600,17 +621,6 @@ public abstract class LuceneTestCase extends Assert { }; } - private void assumptionIgnored(AssumptionViolatedException e, Description description) { - System.err.print("NOTE: Assume failed in '" + description.getDisplayName() + "' (ignored):"); - if (VERBOSE) { - System.err.println(); - e.printStackTrace(System.err); - } else { - System.err.print(" "); - System.err.println(e.getMessage()); - } - } - private void failed(Throwable e, Description description) { testsFailed = true; reportAdditionalFailureInfo(); @@ -677,33 +687,27 @@ public abstract class LuceneTestCase extends Assert { } catch (Throwable t) { errors.add(t); } - + MultipleFailureException.assertEmpty(errors); } }; } } - + /** * Setup before the tests. */ private final void setUpInternal() throws Exception { - seed = "random".equals(TEST_SEED) ? seedRand.nextLong() : ThreeLongs.fromString(TEST_SEED).l2; - random.setSeed(seed); - Thread.currentThread().setName("LTC-main#seed=" + - new ThreeLongs(staticSeed, seed, LuceneTestCaseRunner.runnerSeed)); + RandomizedContext.current().getRunnerSeedAsString()); savedBoolMaxClauseCount = BooleanQuery.getMaxClauseCount(); if (useNoMemoryExpensiveCodec) { String defFormat = _TestUtil.getPostingsFormat("thisCodeMakesAbsolutelyNoSenseCanWeDeleteIt"); - // Stupid: assumeFalse in setUp() does not print any information, because - // TestWatchman does not watch test during setUp() - getName() is also not defined... - // => print info directly and use assume without message: if ("SimpleText".equals(defFormat) || "Memory".equals(defFormat)) { - System.err.println("NOTE: A test method in " + getClass().getSimpleName() + " was ignored, as it uses too much memory with " + defFormat + "."); - Assume.assumeTrue(false); + assumeTrue("NOTE: A test method in " + getClass().getSimpleName() + + " was ignored, as it uses too much memory with " + defFormat + ".", false); } } } @@ -954,7 +958,7 @@ public abstract class LuceneTestCase extends Assert { } public static int atLeast(int i) { - return atLeast(random, i); + return atLeast(random(), i); } /** @@ -971,7 +975,7 @@ public abstract class LuceneTestCase extends Assert { } public static boolean rarely() { - return rarely(random); + return rarely(random()); } public static boolean usually(Random random) { @@ -979,7 +983,7 @@ public abstract class LuceneTestCase extends Assert { } public static boolean usually() { - return usually(random); + return usually(random()); } public static void assumeTrue(String msg, boolean b) { @@ -1031,7 +1035,7 @@ public abstract class LuceneTestCase extends Assert { /** create a new index writer config with random defaults */ public static IndexWriterConfig newIndexWriterConfig(Version v, Analyzer a) { - return newIndexWriterConfig(random, v, a); + return newIndexWriterConfig(random(), v, a); } /** create a new index writer config with random defaults using the specified random */ @@ -1085,11 +1089,11 @@ public abstract class LuceneTestCase extends Assert { } public static LogMergePolicy newLogMergePolicy() { - return newLogMergePolicy(random); + return newLogMergePolicy(random()); } public static TieredMergePolicy newTieredMergePolicy() { - return newTieredMergePolicy(random); + return newTieredMergePolicy(random()); } public static LogMergePolicy newLogMergePolicy(Random r) { @@ -1161,7 +1165,7 @@ public abstract class LuceneTestCase extends Assert { * overwritten. */ public static MockDirectoryWrapper newDirectory() throws IOException { - return newDirectory(random); + return newDirectory(random()); } /** @@ -1185,7 +1189,7 @@ public abstract class LuceneTestCase extends Assert { * information. */ public static MockDirectoryWrapper newDirectory(Directory d) throws IOException { - return newDirectory(random, d); + return newDirectory(random(), d); } /** Returns a new FSDirectory instance over the given file, which must be a folder. */ @@ -1197,7 +1201,7 @@ public abstract class LuceneTestCase extends Assert { public static MockDirectoryWrapper newFSDirectory(File f, LockFactory lf) throws IOException { String fsdirClass = TEST_DIRECTORY; if (fsdirClass.equals("random")) { - fsdirClass = FS_DIRECTORIES[random.nextInt(FS_DIRECTORIES.length)]; + fsdirClass = FS_DIRECTORIES[random().nextInt(FS_DIRECTORIES.length)]; } Class clazz; @@ -1206,12 +1210,13 @@ public abstract class LuceneTestCase extends Assert { clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass); } catch (ClassCastException e) { // TEST_DIRECTORY is not a sub-class of FSDirectory, so draw one at random - fsdirClass = FS_DIRECTORIES[random.nextInt(FS_DIRECTORIES.length)]; + fsdirClass = FS_DIRECTORIES[random().nextInt(FS_DIRECTORIES.length)]; clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass); } Directory fsdir = newFSDirectoryImpl(clazz, f); - MockDirectoryWrapper dir = new MockDirectoryWrapper(random, maybeNRTWrap(random, fsdir)); + MockDirectoryWrapper dir = new MockDirectoryWrapper( + random(), maybeNRTWrap(random(), fsdir)); if (lf != null) { dir.setLockFactory(lf); } @@ -1248,7 +1253,7 @@ public abstract class LuceneTestCase extends Assert { } public static Field newField(String name, String value, FieldType type) { - return newField(random, name, value, type); + return newField(random(), name, value, type); } public static Field newField(Random random, String name, String value, FieldType type) { @@ -1375,6 +1380,7 @@ public abstract class LuceneTestCase extends Assert { /** Sometimes wrap the IndexReader as slow, parallel or filter reader (or combinations of that) */ public static IndexReader maybeWrapReader(IndexReader r) throws IOException { + Random random = random(); if (rarely()) { // TODO: remove this, and fix those tests to wrap before putting slow around: final boolean wasOriginallyAtomic = r instanceof AtomicReader; @@ -1439,6 +1445,7 @@ public abstract class LuceneTestCase extends Assert { * with one that returns null for getSequentialSubReaders. */ public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException { + Random random = random(); if (usually()) { if (maybeWrap) { r = maybeWrapReader(r); @@ -1477,7 +1484,7 @@ public abstract class LuceneTestCase extends Assert { return ret; } } - + static void shutdownExecutorService(ExecutorService ex) { if (ex != null) { ex.shutdown(); @@ -1508,21 +1515,22 @@ public abstract class LuceneTestCase extends Assert { // We get here from InterceptTestCaseEvents on the 'failed' event.... public static void reportPartialFailureInfo() { - System.err.println("NOTE: reproduce with (hopefully): ant test -Dtestcase=" + testClassesRun.get(testClassesRun.size()-1) - + " -Dtests.seed=" + new ThreeLongs(staticSeed, 0L, LuceneTestCaseRunner.runnerSeed) - + reproduceWithExtraParams()); + System.err.println("NOTE: reproduce with (hopefully): ant test " + + "-Dtests.class=*." + getTestClass().getSimpleName() + + " -Dtests.seed=" + RandomizedContext.current().getRunnerSeedAsString() + + reproduceWithExtraParams()); } - + // We get here from InterceptTestCaseEvents on the 'failed' event.... public void reportAdditionalFailureInfo() { StringBuilder b = new StringBuilder(); - b.append("NOTE: reproduce with: ant test -Dtestcase=") - .append(getClass().getSimpleName()); + b.append("NOTE: reproduce with: ant test ") + .append("-Dtests.class=*.").append(getTestClass().getSimpleName()); if (getName() != null) { - b.append(" -Dtestmethod=").append(getName()); + b.append(" -Dtests.method=").append(getName()); } b.append(" -Dtests.seed=") - .append(new ThreeLongs(staticSeed, seed, LuceneTestCaseRunner.runnerSeed)) + .append(RandomizedContext.current().getRunnerSeedAsString()) .append(reproduceWithExtraParams()); System.err.println(b.toString()); } @@ -1578,14 +1586,6 @@ public abstract class LuceneTestCase extends Assert { // initialized by the TestRunner static boolean useNoMemoryExpensiveCodec; - - // recorded seed: for beforeClass - private static long staticSeed; - // seed for individual test methods, changed in @before - private long seed; - - static final Random seedRand = new Random(); - protected static final SmartRandom random = new SmartRandom(0); private String name = ""; @@ -1595,6 +1595,7 @@ public abstract class LuceneTestCase extends Assert { @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) + @TestGroup(enabled = false, sysProperty = "tests.nightly") public @interface Nightly {} /** @@ -1603,14 +1604,28 @@ public abstract class LuceneTestCase extends Assert { @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) + @TestGroup(enabled = false, sysProperty = "tests.weekly") public @interface Weekly{} + /** + * Annotation for tests which exhibit a known issue and are temporarily disabled. + */ + @Documented + @Inherited + @Retention(RetentionPolicy.RUNTIME) + @TestGroup(enabled = false, sysProperty = "tests.awaitsfix") + public @interface AwaitsFix { + /** Point to JIRA entry. */ + public String bugUrl(); + } + /** * Annotation for tests that are slow and should be run only when specifically asked to run */ @Documented @Inherited @Retention(RetentionPolicy.RUNTIME) + @TestGroup(enabled = false, sysProperty = "tests.slow") public @interface Slow{} /** @@ -1622,9 +1637,6 @@ public abstract class LuceneTestCase extends Assert { @Target(ElementType.TYPE) public @interface UseNoMemoryExpensiveCodec {} - @Ignore("just a hack") - public final void alwaysIgnoredTestMethod() {} - protected static boolean defaultCodecSupportsDocValues() { return !Codec.getDefault().getName().equals("Lucene3x"); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCaseRunner.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCaseRunner.java deleted file mode 100644 index a0663b216e9..00000000000 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCaseRunner.java +++ /dev/null @@ -1,206 +0,0 @@ -package org.apache.lucene.util; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.lang.annotation.Annotation; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.Random; - -import org.apache.lucene.util.LuceneTestCase.Nightly; -import org.apache.lucene.util.LuceneTestCase.Weekly; -import org.apache.lucene.util.LuceneTestCase.Slow; -import org.apache.lucene.util.LuceneTestCase.UseNoMemoryExpensiveCodec; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.Description; -import org.junit.runner.manipulation.Filter; -import org.junit.runner.manipulation.NoTestsRemainException; -import org.junit.runner.notification.Failure; -import org.junit.runner.notification.RunListener; -import org.junit.runner.notification.RunNotifier; -import org.junit.runners.BlockJUnit4ClassRunner; -import org.junit.runners.model.FrameworkMethod; -import org.junit.runners.model.InitializationError; - -// please don't reorganize these into a wildcard! -import static org.apache.lucene.util.LuceneTestCase.TEST_ITER; -import static org.apache.lucene.util.LuceneTestCase.TEST_ITER_MIN; -import static org.apache.lucene.util.LuceneTestCase.TEST_METHOD; -import static org.apache.lucene.util.LuceneTestCase.TEST_SEED; -import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; -import static org.apache.lucene.util.LuceneTestCase.TEST_WEEKLY; -import static org.apache.lucene.util.LuceneTestCase.TEST_SLOW; -import static org.apache.lucene.util.LuceneTestCase.VERBOSE; - - -/** optionally filters the tests to be run by TEST_METHOD */ -public class LuceneTestCaseRunner extends BlockJUnit4ClassRunner { - private List testMethods; - static final long runnerSeed; - static { - runnerSeed = "random".equals(TEST_SEED) ? LuceneTestCase.seedRand.nextLong() : ThreeLongs.fromString(TEST_SEED).l3; - } - - @Override - protected List computeTestMethods() { - if (testMethods != null) - return testMethods; - - Random r = new Random(runnerSeed); - - testMethods = new ArrayList(); - for (Method m : getTestClass().getJavaClass().getMethods()) { - // check if the current test's class has methods annotated with @Ignore - final Ignore ignored = m.getAnnotation(Ignore.class); - if (ignored != null && !m.getName().equals("alwaysIgnoredTestMethod")) { - System.err.println("NOTE: Ignoring test method '" + m.getName() + "': " + ignored.value()); - } - // add methods starting with "test" - final int mod = m.getModifiers(); - if (m.getAnnotation(Test.class) != null || - (m.getName().startsWith("test") && - !Modifier.isAbstract(mod) && - m.getParameterTypes().length == 0 && - m.getReturnType() == Void.TYPE)) - { - if (Modifier.isStatic(mod)) - throw new RuntimeException("Test methods must not be static."); - testMethods.add(new FrameworkMethod(m)); - } - } - - if (testMethods.isEmpty()) { - throw new RuntimeException("No runnable methods!"); - } - - if (TEST_NIGHTLY == false) { - removeAnnotatedTests(Nightly.class, "@nightly"); - } - if (TEST_WEEKLY == false) { - removeAnnotatedTests(Weekly.class, "@weekly"); - } - if (TEST_SLOW == false) { - removeAnnotatedTests(Slow.class, "@slow"); - } - // sort the test methods first before shuffling them, so that the shuffle is consistent - // across different implementations that might order the methods different originally. - Collections.sort(testMethods, new Comparator() { - @Override - public int compare(FrameworkMethod f1, FrameworkMethod f2) { - return f1.getName().compareTo(f2.getName()); - } - }); - Collections.shuffle(testMethods, r); - return testMethods; - } - - private void removeAnnotatedTests(Class annotation, String userFriendlyName) { - if (getTestClass().getJavaClass().isAnnotationPresent(annotation)) { - /* the test class is annotated with the annotation, remove all methods */ - String className = getTestClass().getJavaClass().getSimpleName(); - System.err.println("NOTE: Ignoring " + userFriendlyName + " test class '" + className + "'"); - testMethods.clear(); - } else { - /* remove all methods with the annotation*/ - for (int i = 0; i < testMethods.size(); i++) { - final FrameworkMethod m = testMethods.get(i); - if (m.getAnnotation(annotation) != null) { - System.err.println("NOTE: Ignoring " + userFriendlyName + " test method '" + m.getName() + "'"); - testMethods.remove(i--); - } - } - } - /* dodge a possible "no-runnable methods" exception by adding a fake ignored test */ - if (testMethods.isEmpty()) { - try { - testMethods.add(new FrameworkMethod(LuceneTestCase.class.getMethod("alwaysIgnoredTestMethod"))); - } catch (Exception e) { throw new RuntimeException(e); } - } - } - - @Override - protected void runChild(FrameworkMethod arg0, RunNotifier arg1) { - if (VERBOSE) { - System.out.println("\nNOTE: running test " + arg0.getName()); - } - - // only print iteration info if the user requested more than one iterations - final boolean verbose = VERBOSE && TEST_ITER > 1; - - final int currentIter[] = new int[1]; - arg1.addListener(new RunListener() { - @Override - public void testFailure(Failure failure) throws Exception { - if (verbose) { - System.out.println("\nNOTE: iteration " + currentIter[0] + " failed! "); - } - } - }); - for (int i = 0; i < TEST_ITER; i++) { - currentIter[0] = i; - if (verbose) { - System.out.println("\nNOTE: running iter=" + (1+i) + " of " + TEST_ITER); - } - super.runChild(arg0, arg1); - if (LuceneTestCase.testsFailed) { - if (i >= TEST_ITER_MIN - 1) { // XXX is this still off-by-one? - break; - } - } - } - } - - public LuceneTestCaseRunner(Class clazz) throws InitializationError { - super(clazz); - - // This TestRunner can handle only LuceneTestCase subclasses - if (!LuceneTestCase.class.isAssignableFrom(clazz)) { - throw new UnsupportedOperationException("LuceneTestCaseRunner can only be used with LuceneTestCase."); - } - - final boolean useNoMemoryExpensiveCodec = LuceneTestCase.useNoMemoryExpensiveCodec = - clazz.isAnnotationPresent(UseNoMemoryExpensiveCodec.class); - if (useNoMemoryExpensiveCodec) { - System.err.println("NOTE: Using no memory expensive codecs (Memory, SimpleText) for " + - clazz.getSimpleName() + "."); - } - - // evil we cannot init our random here, because super() calls computeTestMethods!!!!; - Filter f = new Filter() { - - @Override - public String describe() { return "filters according to TEST_METHOD"; } - - @Override - public boolean shouldRun(Description d) { - return TEST_METHOD == null || d.getMethodName().equals(TEST_METHOD); - } - }; - - try { - f.apply(this); - } catch (NoTestsRemainException e) { - throw new RuntimeException(e); - } - } -} diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/NoStaticHooksShadowing.java b/lucene/test-framework/src/java/org/apache/lucene/util/NoStaticHooksShadowing.java new file mode 100755 index 00000000000..d2f8eb5f7fc --- /dev/null +++ b/lucene/test-framework/src/java/org/apache/lucene/util/NoStaticHooksShadowing.java @@ -0,0 +1,58 @@ +package org.apache.lucene.util; + +import static com.carrotsearch.randomizedtesting.MethodCollector.allDeclaredMethods; +import static com.carrotsearch.randomizedtesting.MethodCollector.annotatedWith; +import static com.carrotsearch.randomizedtesting.MethodCollector.flatten; +import static com.carrotsearch.randomizedtesting.MethodCollector.removeShadowed; + +import java.lang.annotation.Annotation; +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import com.carrotsearch.randomizedtesting.ClassValidator; + +public class NoStaticHooksShadowing implements ClassValidator { + @Override + public void validate(Class clazz) throws Throwable { + List> all = allDeclaredMethods(clazz); + + checkNoShadows(clazz, all, BeforeClass.class); + checkNoShadows(clazz, all, AfterClass.class); + } + + private void checkNoShadows(Class clazz, List> all, Class ann) { + List> methodHierarchy = annotatedWith(all, ann); + List> noShadows = removeShadowed(methodHierarchy); + if (!noShadows.equals(methodHierarchy)) { + Set shadowed = new HashSet(flatten(methodHierarchy)); + shadowed.removeAll(flatten(noShadows)); + + StringBuilder b = new StringBuilder(); + for (Method m : shadowed) { + String sig = signature(m); + for (Method other : flatten(methodHierarchy)) { + if (other != m && sig.equals(signature(other))) { + b.append("Method: " + m.toString() + + "#" + sig + " possibly shadowed by " + + other.toString() + "#" + signature(other) + "\n"); + } + } + } + + throw new RuntimeException("There are shadowed methods annotated with " + + ann.getName() + ". These methods would not be executed by JUnit and need to manually chain themselves which can lead to" + + " maintenance problems.\n" + b.toString().trim()); + } + } + + private String signature(Method m) { + return m.getName() + Arrays.toString(m.getParameterTypes()); + } +} + diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/RandomNoSetSeed.java b/lucene/test-framework/src/java/org/apache/lucene/util/RandomNoSetSeed.java new file mode 100755 index 00000000000..ab338d1c286 --- /dev/null +++ b/lucene/test-framework/src/java/org/apache/lucene/util/RandomNoSetSeed.java @@ -0,0 +1,123 @@ +package org.apache.lucene.util; + +import java.util.Random; + +/** + * A random with a delegate, preventing calls to {@link Random#setSeed(long)} and + * permitting end-of-lifecycle markers. + */ +@SuppressWarnings("serial") +final class RandomNoSetSeed extends Random { + private final Random delegate; + + /** + * If false, the object is dead. Any calls to any method will result + * in an exception. + */ + private volatile boolean alive = true; + + void setDead() { + alive = false; + } + + public RandomNoSetSeed(Random delegate) { + super(0); + this.delegate = delegate; + } + + @Override + protected int next(int bits) { + throw new RuntimeException("Shouldn't be reachable."); + } + + @Override + public boolean nextBoolean() { + checkAlive(); + return delegate.nextBoolean(); + } + + @Override + public void nextBytes(byte[] bytes) { + checkAlive(); + delegate.nextBytes(bytes); + } + + @Override + public double nextDouble() { + checkAlive(); + return delegate.nextDouble(); + } + + @Override + public float nextFloat() { + checkAlive(); + return delegate.nextFloat(); + } + + @Override + public double nextGaussian() { + checkAlive(); + return delegate.nextGaussian(); + } + + @Override + public int nextInt() { + checkAlive(); + return delegate.nextInt(); + } + + @Override + public int nextInt(int n) { + checkAlive(); + return delegate.nextInt(n); + } + + @Override + public long nextLong() { + checkAlive(); + return delegate.nextLong(); + } + + @Override + public void setSeed(long seed) { + // This is an interesting case of observing uninitialized object from an instance method + // (this method is called from the superclass constructor). We allow it. + if (seed == 0 && delegate == null) { + return; + } + + throw new RuntimeException( + RandomNoSetSeed.class.getSimpleName() + + " prevents changing the seed of its random generators to assure repeatability" + + " of tests. If you need a mutable instance of Random, create a new instance," + + " preferably with the initial seed aquired from this Random instance."); + } + + @Override + public String toString() { + checkAlive(); + return delegate.toString(); + } + + @Override + public boolean equals(Object obj) { + checkAlive(); + return delegate.equals(obj); + } + + @Override + public int hashCode() { + checkAlive(); + return delegate.hashCode(); + } + + /** + * Check the liveness status. + */ + private void checkAlive() { + if (!alive) { + throw new RuntimeException("This Random is dead. Do not store references to " + + "Random instances, acquire an instance when you need one."); + } + } +} diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/RequireAssertions.java b/lucene/test-framework/src/java/org/apache/lucene/util/RequireAssertions.java new file mode 100755 index 00000000000..6cce0499b82 --- /dev/null +++ b/lucene/test-framework/src/java/org/apache/lucene/util/RequireAssertions.java @@ -0,0 +1,18 @@ +package org.apache.lucene.util; + +import com.carrotsearch.randomizedtesting.ClassValidator; + +/** + * Require assertions for Lucene/Solr packages. + */ +public class RequireAssertions implements ClassValidator { + @Override + public void validate(Class clazz) throws Throwable { + try { + assert false; + throw new RuntimeException("Enable assertions globally (-ea) or for Solr/Lucene subpackages only."); + } catch (AssertionError e) { + // Ok, enabled. + } + } +} diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/SmartRandom.java b/lucene/test-framework/src/java/org/apache/lucene/util/SmartRandom.java deleted file mode 100644 index 8e92ba24f09..00000000000 --- a/lucene/test-framework/src/java/org/apache/lucene/util/SmartRandom.java +++ /dev/null @@ -1,43 +0,0 @@ -package org.apache.lucene.util; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.util.Random; - -/** - * A random that tracks if its been initialized properly, - * and throws an exception if it hasn't. - */ -public class SmartRandom extends Random { - boolean initialized; - - SmartRandom(long seed) { - super(seed); - } - - @Override - protected int next(int bits) { - if (!initialized) { - System.err.println("!!! WARNING: test is using random from static initializer !!!"); - Thread.dumpStack(); - // I wish, but it causes JRE crashes - // throw new IllegalStateException("you cannot use this random from a static initializer in your test"); - } - return super.next(bits); - } -} diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/ThreeLongs.java b/lucene/test-framework/src/java/org/apache/lucene/util/ThreeLongs.java deleted file mode 100644 index 89113419433..00000000000 --- a/lucene/test-framework/src/java/org/apache/lucene/util/ThreeLongs.java +++ /dev/null @@ -1,46 +0,0 @@ -package org.apache.lucene.util; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** helper class for a random seed that is really 3 random seeds: - *
    - *
  1. The test class's random seed: this is what the test sees in its beforeClass methods - *
  2. The test method's random seed: this is what the test method sees starting in its befores - *
  3. The test runner's random seed (controls the shuffling of test methods) - *
- */ -class ThreeLongs { - public final long l1, l2, l3; - - public ThreeLongs(long l1, long l2, long l3) { - this.l1 = l1; - this.l2 = l2; - this.l3 = l3; - } - - @Override - public String toString() { - return Long.toString(l1, 16) + ":" + Long.toString(l2, 16) + ":" + Long.toString(l3, 16); - } - - public static ThreeLongs fromString(String s) { - String parts[] = s.split(":"); - assert parts.length == 3; - return new ThreeLongs(Long.parseLong(parts[0], 16), Long.parseLong(parts[1], 16), Long.parseLong(parts[2], 16)); - } -} diff --git a/lucene/tools/junit4/cached-timehints.txt b/lucene/tools/junit4/cached-timehints.txt new file mode 100755 index 00000000000..2a7e584bae4 --- /dev/null +++ b/lucene/tools/junit4/cached-timehints.txt @@ -0,0 +1,1052 @@ +org.apache.lucene.TestAssertions=5,4,3,4,4,65 +org.apache.lucene.TestDemo=72,9,9,16,8,16 +org.apache.lucene.TestExternalCodecs=299,136,69,126,994,1102 +org.apache.lucene.TestMergeSchedulerExternal=278,539,590,305,451,338 +org.apache.lucene.TestSearch=22,44,71,17,32,140 +org.apache.lucene.TestSearchForDuplicates=113,98,55,115,126,30 +org.apache.lucene.analysis.TestCachingTokenFilter=7,32,36,9,8,12 +org.apache.lucene.analysis.TestGraphTokenizers=16571 +org.apache.lucene.analysis.TestLookaheadTokenFilter=15138 +org.apache.lucene.analysis.TestMockAnalyzer=1698,2243,5217,1473,734,1862 +org.apache.lucene.analysis.TestMockCharFilter=5,4,4,5,6,10 +org.apache.lucene.analysis.TestNumericTokenStream=15,4,6,7,6,57 +org.apache.lucene.analysis.TestToken=3168,1751,1631,1751,2500,1526 +org.apache.lucene.analysis.ar.TestArabicAnalyzer=861,315,404,472,455,650 +org.apache.lucene.analysis.ar.TestArabicLetterTokenizer=6,6,6,7,4,35 +org.apache.lucene.analysis.ar.TestArabicNormalizationFilter=22,23,37,21,16,75 +org.apache.lucene.analysis.ar.TestArabicStemFilter=39,41,36,102,26,105 +org.apache.lucene.analysis.bg.TestBulgarianAnalyzer=650,495,676,367,855,1127 +org.apache.lucene.analysis.bg.TestBulgarianStemmer=24,17,19,33,21,30 +org.apache.lucene.analysis.br.TestBrazilianStemmer=1001,1049,988,999,1070,1993 +org.apache.lucene.analysis.ca.TestCatalanAnalyzer=706,685,584,549,958,2201 +org.apache.lucene.analysis.charfilter.HTMLStripCharFilterTest=2695,3133,3300,3025,3182,3919 +org.apache.lucene.analysis.charfilter.TestCharFilter=7,7,7,8,7,35 +org.apache.lucene.analysis.charfilter.TestMappingCharFilter=570,743,803,793,1080,31058 +org.apache.lucene.analysis.cjk.TestCJKAnalyzer=3532,3604,4338,4222,1182,4228 +org.apache.lucene.analysis.cjk.TestCJKTokenizer=2089,1496,1885,2154,718,835 +org.apache.lucene.analysis.cjk.TestCJKWidthFilter=457,274,281,236,388,1934 +org.apache.lucene.analysis.cn.TestChineseTokenizer=758,533,984,508,891,875 +org.apache.lucene.analysis.cn.smart.TestSmartChineseAnalyzer=2786,2596,2856,2701,2922,5185 +org.apache.lucene.analysis.commongrams.CommonGramsFilterTest=1368,1167,1111,1160,1655,3648 +org.apache.lucene.analysis.compound.TestCompoundWordTokenFilter=3824,3623,3616,3465,4362,3489 +org.apache.lucene.analysis.core.TestAnalyzers=1354,723,1030,1309,1028,10765 +org.apache.lucene.analysis.core.TestClassicAnalyzer=585,642,663,495,494,4536 +org.apache.lucene.analysis.core.TestDuelingAnalyzers=842,877,836,701,882,13901 +org.apache.lucene.analysis.core.TestKeywordAnalyzer=835,1197,1526,872,884,1065 +org.apache.lucene.analysis.core.TestRandomChains=21010 +org.apache.lucene.analysis.core.TestStandardAnalyzer=2581,2360,1844,2154,719,7580 +org.apache.lucene.analysis.core.TestStopAnalyzer=269,74,93,121,8,20 +org.apache.lucene.analysis.core.TestStopFilter=10,12,10,13,8,30 +org.apache.lucene.analysis.core.TestTypeTokenFilter=6,7,25,6,19,10 +org.apache.lucene.analysis.core.TestUAX29URLEmailAnalyzer=2755 +org.apache.lucene.analysis.core.TestUAX29URLEmailTokenizer=788,437,726,745,694,4569 +org.apache.lucene.analysis.cz.TestCzechAnalyzer=434,376,556,537,542,2365 +org.apache.lucene.analysis.cz.TestCzechStemmer=22,19,15,17,18,65 +org.apache.lucene.analysis.da.TestDanishAnalyzer=3705,3360,4310,2629,669,1038 +org.apache.lucene.analysis.de.TestGermanAnalyzer=742,653,578,594,455,1300 +org.apache.lucene.analysis.de.TestGermanLightStemFilter=387,433,686,613,4243,895 +org.apache.lucene.analysis.de.TestGermanMinimalStemFilter=355,651,748,619,4435,1035 +org.apache.lucene.analysis.de.TestGermanNormalizationFilter=216,449,507,273,207,1213 +org.apache.lucene.analysis.de.TestGermanStemFilter=3040,2483,2514,2397,4258,1200 +org.apache.lucene.analysis.el.GreekAnalyzerTest=933,990,971,862,1175,4872 +org.apache.lucene.analysis.el.TestGreekStemmer=30,59,36,43,48,50 +org.apache.lucene.analysis.en.TestEnglishAnalyzer=1166,762,1395,1258,2629,1815 +org.apache.lucene.analysis.en.TestEnglishMinimalStemFilter=337,397,243,348,270,665 +org.apache.lucene.analysis.en.TestKStemmer=928,1149,1222,816,1857,1885 +org.apache.lucene.analysis.en.TestPorterStemFilter=375,325,463,289,434,600 +org.apache.lucene.analysis.es.TestSpanishAnalyzer=357,431,434,386,1573,4871 +org.apache.lucene.analysis.es.TestSpanishLightStemFilter=326,451,277,329,658,1315 +org.apache.lucene.analysis.eu.TestBasqueAnalyzer=585,425,1382,823,681,985 +org.apache.lucene.analysis.fa.TestPersianAnalyzer=513,670,485,314,637,5303 +org.apache.lucene.analysis.fa.TestPersianNormalizationFilter=11,13,13,11,15,25 +org.apache.lucene.analysis.fi.TestFinnishAnalyzer=792,900,685,939,603,1267 +org.apache.lucene.analysis.fi.TestFinnishLightStemFilter=500,839,613,438,630,1415 +org.apache.lucene.analysis.fr.TestElision=4,5,5,4,6,15 +org.apache.lucene.analysis.fr.TestFrenchAnalyzer=668,1074,489,479,1104,1275 +org.apache.lucene.analysis.fr.TestFrenchLightStemFilter=643,418,493,555,209,840 +org.apache.lucene.analysis.fr.TestFrenchMinimalStemFilter=3714,2525,4491,2535,597,1721 +org.apache.lucene.analysis.ga.TestIrishAnalyzer=995 +org.apache.lucene.analysis.ga.TestIrishLowerCaseFilter=10 +org.apache.lucene.analysis.gl.TestGalicianAnalyzer=918,958,1136,960,3116,1260 +org.apache.lucene.analysis.gl.TestGalicianMinimalStemFilter=673,706,640,369,342,735 +org.apache.lucene.analysis.gl.TestGalicianStemFilter=438,322,578,285,172,462 +org.apache.lucene.analysis.hi.TestHindiAnalyzer=726,636,654,612,1081,1278 +org.apache.lucene.analysis.hi.TestHindiNormalizer=10,10,12,11,8,35 +org.apache.lucene.analysis.hi.TestHindiStemmer=11,9,10,10,8,20 +org.apache.lucene.analysis.hu.TestHungarianAnalyzer=742,767,715,697,791,1280 +org.apache.lucene.analysis.hu.TestHungarianLightStemFilter=572,686,989,735,636,447 +org.apache.lucene.analysis.hunspell.HunspellDictionaryTest=12,12,12,14,42,12 +org.apache.lucene.analysis.hunspell.HunspellStemFilterTest=1327,1212,1648,1306,1881,1265 +org.apache.lucene.analysis.hunspell.HunspellStemmerTest=48,19,18,14,22,140 +org.apache.lucene.analysis.hy.TestArmenianAnalyzer=1002,487,903,580,1429,965 +org.apache.lucene.analysis.icu.TestICUFoldingFilter=2056,2573,1483,2241,3760,4343 +org.apache.lucene.analysis.icu.TestICUNormalizer2Filter=2187,2335,1831,1592,2768,4570 +org.apache.lucene.analysis.icu.TestICUTransformFilter=2678,4159,3243,3388,5000,4689 +org.apache.lucene.analysis.icu.segmentation.TestCharArrayIterator=26,53,21,19,17,82 +org.apache.lucene.analysis.icu.segmentation.TestICUTokenizer=2399,3110,2330,2314,4708,5943 +org.apache.lucene.analysis.icu.segmentation.TestLaoBreakIterator=79,36,111,148,11,380 +org.apache.lucene.analysis.icu.segmentation.TestWithCJKBigramFilter=673,707,760,667,35,175 +org.apache.lucene.analysis.id.TestIndonesianAnalyzer=421,491,665,536,621,1515 +org.apache.lucene.analysis.id.TestIndonesianStemmer=13,12,13,14,259,313 +org.apache.lucene.analysis.in.TestIndicNormalizer=14,14,14,9,12,30 +org.apache.lucene.analysis.it.TestItalianAnalyzer=1176,1091,1033,1204,1064,729 +org.apache.lucene.analysis.it.TestItalianLightStemFilter=427,268,449,493,720,924 +org.apache.lucene.analysis.ja.TestExtendedMode=11051 +org.apache.lucene.analysis.ja.TestJapaneseAnalyzer=2932 +org.apache.lucene.analysis.ja.TestJapaneseBaseFormFilter=5867 +org.apache.lucene.analysis.ja.TestJapaneseKatakanaStemFilter=4892 +org.apache.lucene.analysis.ja.TestJapaneseReadingFormFilter=2902 +org.apache.lucene.analysis.ja.TestJapaneseTokenizer=21035 +org.apache.lucene.analysis.ja.TestSearchMode=585 +org.apache.lucene.analysis.ja.dict.TestTokenInfoDictionary=1147 +org.apache.lucene.analysis.ja.dict.UserDictionaryTest=50 +org.apache.lucene.analysis.ja.util.TestToStringUtil=11 +org.apache.lucene.analysis.kuromoji.SegmenterTest=319,248,268,310,489 +org.apache.lucene.analysis.kuromoji.TestExtendedMode=1506,1174,1537,1780,2333 +org.apache.lucene.analysis.kuromoji.TestKuromojiAnalyzer=3668,3597,3703,3410,4044 +org.apache.lucene.analysis.kuromoji.TestKuromojiBaseFormFilter=2908,3299,3001,2800,3975 +org.apache.lucene.analysis.kuromoji.TestKuromojiTokenizer=4494,4640,4435,4159,4637 +org.apache.lucene.analysis.kuromoji.TestSearchMode=494,580,501,520,102 +org.apache.lucene.analysis.kuromoji.dict.TestTokenInfoDictionary=1406,1319,1249,1180,1187 +org.apache.lucene.analysis.kuromoji.dict.UserDictionaryTest=276,258,257,232,43 +org.apache.lucene.analysis.kuromoji.util.TestToStringUtil=17,14,9,24,11 +org.apache.lucene.analysis.lv.TestLatvianAnalyzer=655,605,760,451,757,1147 +org.apache.lucene.analysis.lv.TestLatvianStemmer=35,20,29,26,14,45 +org.apache.lucene.analysis.miscellaneous.PatternAnalyzerTest=882,936,1032,832,948,1515 +org.apache.lucene.analysis.miscellaneous.TestASCIIFoldingFilter=416,454,442,385,399,975 +org.apache.lucene.analysis.miscellaneous.TestCapitalizationFilter=294,479,376,593,806,1020 +org.apache.lucene.analysis.miscellaneous.TestEmptyTokenStream=5,4,6,3,3,20 +org.apache.lucene.analysis.miscellaneous.TestHyphenatedWordsFilter=813,427,434,750,406,1540 +org.apache.lucene.analysis.miscellaneous.TestKeepWordFilter=227,209,158,208,234,560 +org.apache.lucene.analysis.miscellaneous.TestKeywordMarkerFilter=7,6,5,5,248,128 +org.apache.lucene.analysis.miscellaneous.TestLengthFilter=21,4,5,6,5,60 +org.apache.lucene.analysis.miscellaneous.TestLimitTokenCountAnalyzer=129,152,173,190,89,516 +org.apache.lucene.analysis.miscellaneous.TestPerFieldAnalzyerWrapper=7,7,6,26,5,28 +org.apache.lucene.analysis.miscellaneous.TestPrefixAndSuffixAwareTokenFilter=18,39,26,12,7,10 +org.apache.lucene.analysis.miscellaneous.TestPrefixAwareTokenFilter=7,8,5,8,7,5 +org.apache.lucene.analysis.miscellaneous.TestRemoveDuplicatesTokenFilter=1223,1192,1311,1130,3032,7199 +org.apache.lucene.analysis.miscellaneous.TestSingleTokenTokenFilter=5,4,20,4,2,60 +org.apache.lucene.analysis.miscellaneous.TestStemmerOverrideFilter=4,4,4,4,3,5 +org.apache.lucene.analysis.miscellaneous.TestTrimFilter=572,423,380,498,858,3434 +org.apache.lucene.analysis.miscellaneous.TestWordDelimiterFilter=3187,2709,3407,2942,3216,5573 +org.apache.lucene.analysis.morfologik.TestMorfologikAnalyzer=1490,1559,1676,2357,1993,1974 +org.apache.lucene.analysis.ngram.EdgeNGramTokenFilterTest=3660,4136,3892,2591,1716,3785 +org.apache.lucene.analysis.ngram.EdgeNGramTokenizerTest=1927,2088,2489,1295,1208,5075 +org.apache.lucene.analysis.ngram.NGramTokenFilterTest=5941,4022,5224,5217,8889,4869 +org.apache.lucene.analysis.ngram.NGramTokenizerTest=12649,13480,11157,11566,13624,32608 +org.apache.lucene.analysis.nl.TestDutchStemmer=1493,1169,1590,1109,705,1045 +org.apache.lucene.analysis.no.TestNorwegianAnalyzer=686,594,827,894,791,1154 +org.apache.lucene.analysis.no.TestNorwegianLightStemFilter=605 +org.apache.lucene.analysis.no.TestNorwegianMinimalStemFilter=944 +org.apache.lucene.analysis.path.TestPathHierarchyTokenizer=674,631,649,565,468,1720 +org.apache.lucene.analysis.path.TestReversePathHierarchyTokenizer=488,376,567,332,481,2100 +org.apache.lucene.analysis.pattern.TestPatternReplaceCharFilter=1486,1425,1646,1598,3624,2207 +org.apache.lucene.analysis.pattern.TestPatternReplaceFilter=1159,1067,824,1219,1089,2137 +org.apache.lucene.analysis.pattern.TestPatternTokenizer=943,1426,1328,1555,656,2295 +org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterTest=55,14,16,19,10,15 +org.apache.lucene.analysis.payloads.NumericPayloadTokenFilterTest=5,7,4,5,6,15 +org.apache.lucene.analysis.payloads.TokenOffsetPayloadTokenFilterTest=6,4,7,5,5,10 +org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilterTest=46,28,33,35,8,10 +org.apache.lucene.analysis.phonetic.DoubleMetaphoneFilterTest=1586,1861,1666,1821,1946,2345 +org.apache.lucene.analysis.phonetic.TestBeiderMorseFilter=952,792,1084,945,1038,1133 +org.apache.lucene.analysis.phonetic.TestPhoneticFilter=2858,2913,3046,2568,3197,4524 +org.apache.lucene.analysis.pl.TestPolishAnalyzer=1618,1484,1492,1569,1677,1607 +org.apache.lucene.analysis.position.PositionFilterTest=17,10,13,14,9,30 +org.apache.lucene.analysis.pt.TestPortugueseAnalyzer=497,415,382,407,504,955 +org.apache.lucene.analysis.pt.TestPortugueseLightStemFilter=585,664,657,584,484,1735 +org.apache.lucene.analysis.pt.TestPortugueseMinimalStemFilter=525,504,624,669,1655,1085 +org.apache.lucene.analysis.pt.TestPortugueseStemFilter=1264,1229,1397,1195,1292,2315 +org.apache.lucene.analysis.query.QueryAutoStopWordAnalyzerTest=206,272,382,330,590,215 +org.apache.lucene.analysis.reverse.TestReverseStringFilter=525,278,426,417,299,1030 +org.apache.lucene.analysis.ro.TestRomanianAnalyzer=966,597,972,684,1142,1590 +org.apache.lucene.analysis.ru.TestRussianAnalyzer=612,725,520,454,791,1700 +org.apache.lucene.analysis.ru.TestRussianLetterTokenizer=6,7,10,10,4,299 +org.apache.lucene.analysis.ru.TestRussianLightStemFilter=438,567,485,602,723,1870 +org.apache.lucene.analysis.shingle.ShingleAnalyzerWrapperTest=657,645,557,594,917,840 +org.apache.lucene.analysis.shingle.ShingleFilterTest=732,782,903,813,2519,8333 +org.apache.lucene.analysis.sinks.DateRecognizerSinkTokenizerTest=40,20,44,15,14,335 +org.apache.lucene.analysis.sinks.TestTeeSinkTokenFilter=101,70,174,32,321,70 +org.apache.lucene.analysis.sinks.TokenRangeSinkTokenizerTest=3,6,4,6,4,10 +org.apache.lucene.analysis.sinks.TokenTypeSinkTokenizerTest=48,10,6,17,15,10 +org.apache.lucene.analysis.snowball.TestSnowball=44,97,41,166,27,120 +org.apache.lucene.analysis.snowball.TestSnowballVocab=3614,3519,3640,3611,4575,7073 +org.apache.lucene.analysis.sv.TestSwedishAnalyzer=576,1216,1640,697,581,4070 +org.apache.lucene.analysis.sv.TestSwedishLightStemFilter=372,328,682,377,4413,2260 +org.apache.lucene.analysis.synonym.TestSolrSynonymParser=16,31,15,33,29,110 +org.apache.lucene.analysis.synonym.TestSynonymMapFilter=1614,1807,1181,1745,907,24940 +org.apache.lucene.analysis.synonym.TestWordnetSynonymParser=7,36,35,7,33,390 +org.apache.lucene.analysis.th.TestThaiAnalyzer=477,454,489,434,743,7038 +org.apache.lucene.analysis.tokenattributes.TestCharTermAttributeImpl=964,788,1020,653,650,1254 +org.apache.lucene.analysis.tokenattributes.TestSimpleAttributeImpl=5,5,3,4,4,6 +org.apache.lucene.analysis.tr.TestTurkishAnalyzer=656,857,623,697,973,1985 +org.apache.lucene.analysis.tr.TestTurkishLowerCaseFilter=7,7,6,7,7,59 +org.apache.lucene.analysis.uima.UIMABaseAnalyzerTest=6723,3808 +org.apache.lucene.analysis.uima.UIMATypeAwareAnalyzerTest=2287,6515 +org.apache.lucene.analysis.uima.ae.BasicAEProviderTest=368,378 +org.apache.lucene.analysis.uima.ae.OverridingParamsAEProviderTest=1842,1578 +org.apache.lucene.analysis.util.TestCharArrayIterator=179,262,165,197,147,185 +org.apache.lucene.analysis.util.TestCharArrayMap=70,133,99,92,134,210 +org.apache.lucene.analysis.util.TestCharArraySet=43,42,52,41,39,129 +org.apache.lucene.analysis.util.TestCharTokenizers=880,1051,979,734,906,2057 +org.apache.lucene.analysis.util.TestCharacterUtils=17,16,13,13,12,40 +org.apache.lucene.analysis.util.TestSegmentingTokenizerBase=705,748,931,1103,1144 +org.apache.lucene.analysis.util.TestWordlistLoader=7,6,13,7,9,15 +org.apache.lucene.analysis.wikipedia.WikipediaTokenizerTest=766,706,816,649,677,3740 +org.apache.lucene.benchmark.byTask.TestPerfTasksLogic=6924,7297,7622,7434,8195,8831 +org.apache.lucene.benchmark.byTask.TestPerfTasksParse=173,110,230,139,1453,1205 +org.apache.lucene.benchmark.byTask.feeds.DocMakerTest=319,392,327,275,318,838 +org.apache.lucene.benchmark.byTask.feeds.LineDocSourceTest=1097,1408,2193,2224,1720,3454 +org.apache.lucene.benchmark.byTask.feeds.TrecContentSourceTest=114,110,89,116,356,150 +org.apache.lucene.benchmark.byTask.feeds.demohtml.TestHtmlParser=261,254,244,282,49,330 +org.apache.lucene.benchmark.byTask.tasks.CreateIndexTaskTest=607,591,631,512,523,1078 +org.apache.lucene.benchmark.byTask.tasks.PerfTaskTest=194,212,224,182,781,256 +org.apache.lucene.benchmark.byTask.tasks.SearchWithSortTaskTest=75,109,86,103,115,342 +org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTaskTest=443,616,929,364,470,657 +org.apache.lucene.benchmark.byTask.tasks.alt.AltPackageTaskTest=83,68,80,104,320,95 +org.apache.lucene.benchmark.byTask.utils.StreamUtilsTest=406,412,380,379,163,395 +org.apache.lucene.benchmark.byTask.utils.TestConfig=189,190,179,208,5,14 +org.apache.lucene.benchmark.quality.TestQualityRun=3117,1965,3256,3455,3779,3937 +org.apache.lucene.codecs.appending.TestAppendingCodec=18,55,26,28,96,20 +org.apache.lucene.codecs.intblock.TestIntBlockCodec=9,12,9,9,9,11 +org.apache.lucene.codecs.lucene3x.TestImpersonation=2,2,2,3,2,34 +org.apache.lucene.codecs.lucene3x.TestSurrogates=735,321,102,370,812,756 +org.apache.lucene.codecs.lucene3x.TestTermInfosReaderIndex=170,334,143,293,667,1017 +org.apache.lucene.codecs.lucene40.TestBitVector=653,739,937,911,802,1458 +org.apache.lucene.codecs.lucene40.TestDocValues=462,425,353,286 +org.apache.lucene.codecs.lucene40.TestReuseDocsEnum=554,620,247,460,562,517 +org.apache.lucene.codecs.lucene40.values.TestDocValues=200,346 +org.apache.lucene.codecs.perfield.TestPerFieldPostingsFormat=1142,741,765,1638,319,432 +org.apache.lucene.codecs.pulsing.Test10KPulsings=4,4,2,2,2,3 +org.apache.lucene.codecs.pulsing.TestPulsingReuse=10,53,10,9,16,310 +org.apache.lucene.collation.TestCollationKeyAnalyzer=1456,1241,790,206,789,440 +org.apache.lucene.collation.TestCollationKeyFilter=478,425,596,668,588,335 +org.apache.lucene.collation.TestICUCollationKeyAnalyzer=1340,681,3304,1934,1429,465 +org.apache.lucene.collation.TestICUCollationKeyFilter=1100,1118,1489,927,911,935 +org.apache.lucene.demo.TestDemo=531,969,964,985,533,597 +org.apache.lucene.document.TestBinaryDocument=12,11,10,11,26,15 +org.apache.lucene.document.TestDateTools=9,11,18,19,14,27 +org.apache.lucene.document.TestDocument=191,36,36,451,53,52 +org.apache.lucene.facet.enhancements.EnhancementsPayloadIteratorTest=59,40,66,274,57,180 +org.apache.lucene.facet.enhancements.TwoEnhancementsTest=115,79,203,89,503,61 +org.apache.lucene.facet.enhancements.association.AssociationPropertyTest=8,7,6,8,10,25 +org.apache.lucene.facet.enhancements.association.CustomAssociationPropertyTest=33,40,42,28,91,180 +org.apache.lucene.facet.enhancements.params.DefaultEnhancementsIndexingParamsTest=4,7,4,5,5,224 +org.apache.lucene.facet.example.TestAdaptiveExample=52,30,41,67,22,32 +org.apache.lucene.facet.example.TestAssociationExample=234,368,263,249,124,199 +org.apache.lucene.facet.example.TestMultiCLExample=167,145,137,271,17,654 +org.apache.lucene.facet.example.TestSimpleExample=54,56,71,78,55,206 +org.apache.lucene.facet.index.CategoryContainerTest=28,224,33,48,55,123 +org.apache.lucene.facet.index.CategoryListPayloadStreamTest=5,5,5,5,5,15 +org.apache.lucene.facet.index.FacetsPayloadProcessorProviderTest=593,929,672,587,2909,1345 +org.apache.lucene.facet.index.attributes.CategoryAttributeImplTest=7,10,8,8,10,75 +org.apache.lucene.facet.index.attributes.CategoryAttributesIterableTest=4,5,4,5,1,105 +org.apache.lucene.facet.index.categorypolicy.OrdinalPolicyTest=370,1572,853,415,1606,1359 +org.apache.lucene.facet.index.categorypolicy.PathPolicyTest=1568,547,899,708,885,103 +org.apache.lucene.facet.index.params.CategoryListParamsTest=6,7,7,7,12,17 +org.apache.lucene.facet.index.params.DefaultFacetIndexingParamsTest=9,7,7,5,7,15 +org.apache.lucene.facet.index.params.PerDimensionIndexingParamsTest=16,17,18,15,8,140 +org.apache.lucene.facet.index.streaming.CategoryAttributesStreamTest=9,8,12,9,3,9 +org.apache.lucene.facet.index.streaming.CategoryParentsStreamTest=98,129,127,166,107,112 +org.apache.lucene.facet.index.streaming.CategoryTokenizerTest=311,115,29,28,15,20 +org.apache.lucene.facet.search.AdaptiveAccumulatorTest=22499,11832,13654,10852,35241,17902 +org.apache.lucene.facet.search.CategoryListIteratorTest=307,413,284,289,114,53 +org.apache.lucene.facet.search.DrillDownTest=88,94,148,94,42,92 +org.apache.lucene.facet.search.SamplingWrapperTest=23585,14646,14012,15905,33551,17013 +org.apache.lucene.facet.search.TestCategoryListCache=57,58,59,123,582,48 +org.apache.lucene.facet.search.TestFacetArrays=3,3,2,3,2,82 +org.apache.lucene.facet.search.TestFacetsAccumulatorWithComplement=198,186,249,181,79,768 +org.apache.lucene.facet.search.TestMultipleCategoryLists=574,343,326,354,210,614 +org.apache.lucene.facet.search.TestScoredDocIdCollector=125,104,59,68,120,145 +org.apache.lucene.facet.search.TestTopKInEachNodeResultHandler=2707,605,816,972,455,989 +org.apache.lucene.facet.search.TestTopKResultsHandler=298,565,254,1042,510,532 +org.apache.lucene.facet.search.TestTopKResultsHandlerRandom=9714,7837,8206,14089,4201,5412 +org.apache.lucene.facet.search.TestTotalFacetCounts=287,1307,202,271,88,1423 +org.apache.lucene.facet.search.TestTotalFacetCountsCache=1568,3359,3670,3162,3095,1322 +org.apache.lucene.facet.search.association.AssociationsFacetRequestTest=303,112,112,246,120,87 +org.apache.lucene.facet.search.params.FacetRequestTest=63,47,43,74,49,25 +org.apache.lucene.facet.search.params.FacetSearchParamsTest=32,307,40,45,25,573 +org.apache.lucene.facet.search.params.MultiIteratorsPerCLParamsTest=56,56,108,71,33,123 +org.apache.lucene.facet.search.sampling.SamplingAccumulatorTest=17217,17997,16760,19828,33173,15443 +org.apache.lucene.facet.taxonomy.TestCategoryPath=296,287,268,268,84,610 +org.apache.lucene.facet.taxonomy.TestTaxonomyCombined=1924,2478,1944,2179,1228,2005 +org.apache.lucene.facet.taxonomy.directory.TestAddTaxonomies=592,571,2415,1608,3067,1561 +org.apache.lucene.facet.taxonomy.directory.TestDirectoryTaxonomyReader=905,996,835,943,136,438 +org.apache.lucene.facet.taxonomy.directory.TestDirectoryTaxonomyWriter=172,110,89,276,156,393 +org.apache.lucene.facet.taxonomy.directory.TestIndexClose=2324,3257,2841,2855,4987,1320 +org.apache.lucene.facet.taxonomy.writercache.cl2o.TestCharBlockArray=1094,1019,1165,1022,585,2117 +org.apache.lucene.facet.taxonomy.writercache.cl2o.TestCompactLabelToOrdinal=891,813,1331,836,754,928 +org.apache.lucene.facet.util.TestScoredDocIDsUtils=1435,1312,1768,993,1130,1569 +org.apache.lucene.index.Test2BPostings=1,1,1,2,1,2 +org.apache.lucene.index.Test2BTerms=2,3,2,2,4,3 +org.apache.lucene.index.TestAddIndexes=5656,8210,11930,11938,12789,3705 +org.apache.lucene.index.TestAtomicUpdate=2803,3435,1655,3604,3545,3787 +org.apache.lucene.index.TestBackwardsCompatibility=6788,6173,6638,7781,13854,5551 +org.apache.lucene.index.TestBinaryTerms=28,52,27,65,122,28 +org.apache.lucene.index.TestByteSlices=2116,2102,2677,2579,2281,4823 +org.apache.lucene.index.TestCheckIndex=23,23,13,414,434,130 +org.apache.lucene.index.TestCodecs=342,342,191,774,571,273 +org.apache.lucene.index.TestCompoundFile=2800,4008,4823,2880,2807,2756 +org.apache.lucene.index.TestConcurrentMergeScheduler=804,1182,1384,3413,1822,595 +org.apache.lucene.index.TestConsistentFieldNumbers=802,567,391,346,1390,499 +org.apache.lucene.index.TestCrash=441,624,261,491,1553,1050 +org.apache.lucene.index.TestCrashCausesCorruptIndex=157,240,144,213,458,120 +org.apache.lucene.index.TestCustomNorms=711,368,588,216,789,159 +org.apache.lucene.index.TestDeletionPolicy=8116,4136,5647,4380,4235,4265 +org.apache.lucene.index.TestDirectoryReader=1986,5251,1396,723,813,1540 +org.apache.lucene.index.TestDirectoryReaderReopen=4353,1720 +org.apache.lucene.index.TestDoc=1034,284,340,315,565,536 +org.apache.lucene.index.TestDocCount=34,58,33,33,353,51 +org.apache.lucene.index.TestDocTermOrds=149,266,156,160,1326,85 +org.apache.lucene.index.TestDocValuesIndexing=5176,2982,215,3811,3242,1539 +org.apache.lucene.index.TestDocsAndPositions=1747,1950,1671,1495,779,379 +org.apache.lucene.index.TestDocumentWriter=185,617,472,256,146,235 +org.apache.lucene.index.TestDocumentsWriterDeleteQueue=367,923,523,375,1085,111 +org.apache.lucene.index.TestDuelingCodecs=4296,9589,3472,5808,3645,6374 +org.apache.lucene.index.TestFieldInfos=6,6,4,4,5,7 +org.apache.lucene.index.TestFieldsReader=750,7225,569,600,974,810 +org.apache.lucene.index.TestFilterAtomicReader=97,137 +org.apache.lucene.index.TestFilterIndexReader=24,16,55,19 +org.apache.lucene.index.TestFlex=308,764,1203,291,265,149 +org.apache.lucene.index.TestFlushByRamOrCountsPolicy=7133,12285,6734,7873,3506,4316 +org.apache.lucene.index.TestForTooMuchCloning=185,316,329,124,1820,1067 +org.apache.lucene.index.TestForceMergeForever=756,2368,739,425,2869,1932 +org.apache.lucene.index.TestIndexCommit=2,3,17,2,2,8 +org.apache.lucene.index.TestIndexFileDeleter=29,109,24,29,30,93 +org.apache.lucene.index.TestIndexInput=3,3,5,3,2,337 +org.apache.lucene.index.TestIndexReader=1960,2129,2413,1600 +org.apache.lucene.index.TestIndexReaderReopen=1864,2585,2300,1574 +org.apache.lucene.index.TestIndexSplitter=1331,1457,1517,1413,1437,1630 +org.apache.lucene.index.TestIndexWriter=6282,11908,12177,8100,4809,8940 +org.apache.lucene.index.TestIndexWriterCommit=2366,1676,2636,2646,2383,1154 +org.apache.lucene.index.TestIndexWriterConfig=7,7,7,12,8,17 +org.apache.lucene.index.TestIndexWriterDelete=2281,2558,1979,3413,3524,1779 +org.apache.lucene.index.TestIndexWriterExceptions=3568,4436,4135,3765,6833,10861 +org.apache.lucene.index.TestIndexWriterForceMerge=2330,2051,2361,2155,4869,2636 +org.apache.lucene.index.TestIndexWriterLockRelease=10,8,101,6,6,11 +org.apache.lucene.index.TestIndexWriterMergePolicy=3397,2502,5830,3046,2464,2072 +org.apache.lucene.index.TestIndexWriterMerging=2561,3549,5304,4152,8030,3091 +org.apache.lucene.index.TestIndexWriterNRTIsCurrent=634,556,680,1152,1742,873 +org.apache.lucene.index.TestIndexWriterOnDiskFull=867,1106,335,2372,509,1599 +org.apache.lucene.index.TestIndexWriterOnJRECrash=3,4,3,3,2,3 +org.apache.lucene.index.TestIndexWriterReader=6795,15686,11066,16046,11674,19233 +org.apache.lucene.index.TestIndexWriterUnicode=1721,2844,1810,2625,2442,2611 +org.apache.lucene.index.TestIndexWriterWithThreads=7079,4587,9820,5817,5874,4717 +org.apache.lucene.index.TestIndexableField=382,1181,634,1124,1582,110 +org.apache.lucene.index.TestIsCurrent=11,12,7,13,16,29 +org.apache.lucene.index.TestLazyProxSkipping=47,750,403,549,99,3851 +org.apache.lucene.index.TestLongPostings=1844,3316,1490,1450,1608,4027 +org.apache.lucene.index.TestMaxTermFrequency=1081,1055,573,974,812,149 +org.apache.lucene.index.TestMixedCodecs=1293,1986,1292,9,1985,1328 +org.apache.lucene.index.TestMultiFields=268,88,179,1371,219,145 +org.apache.lucene.index.TestMultiLevelSkipList=341,117,186,268,142,201 +org.apache.lucene.index.TestMultiPassIndexSplitter=742,711,832,773,881,864 +org.apache.lucene.index.TestMultiReader=687,686,580,568 +org.apache.lucene.index.TestNRTReaderWithThreads=1493,2447,3215,1192,2467,2221 +org.apache.lucene.index.TestNRTThreads=5816,4106,3114,48,4373,5754 +org.apache.lucene.index.TestNeverDelete=2035,2120,1066,1972,2137,1247 +org.apache.lucene.index.TestNewestSegment=5,5,5,4,4,10 +org.apache.lucene.index.TestNoDeletionPolicy=297,64,59,58,140,54 +org.apache.lucene.index.TestNoMergePolicy=5,7,6,5,7,12 +org.apache.lucene.index.TestNoMergeScheduler=5,5,4,5,7,95 +org.apache.lucene.index.TestNorms=5100,1844,6351,2084,10684,2256 +org.apache.lucene.index.TestOmitNorms=1194,1404,2306,3802,795,2799 +org.apache.lucene.index.TestOmitPositions=369,229,82,130,137,120 +org.apache.lucene.index.TestOmitTf=1176,841,685,1187,1272,1167 +org.apache.lucene.index.TestPKIndexSplitter=937,882,857,788,921,1014 +org.apache.lucene.index.TestParallelAtomicReader=88,135 +org.apache.lucene.index.TestParallelCompositeReader=1632,649 +org.apache.lucene.index.TestParallelReader=115,87,70,204 +org.apache.lucene.index.TestParallelReaderEmptyIndex=39,64,56,31,127,98 +org.apache.lucene.index.TestParallelTermEnum=37,54,11,18,18,55 +org.apache.lucene.index.TestPayloadProcessorProvider=607,266,359,738,344,263 +org.apache.lucene.index.TestPayloads=95,696,61,67,213,315 +org.apache.lucene.index.TestPerSegmentDeletes=32,35,37,56,46,736 +org.apache.lucene.index.TestPersistentSnapshotDeletionPolicy=3713,3673,2795,2904,2385,2434 +org.apache.lucene.index.TestPostingsOffsets=39,33,1867,854,561,265 +org.apache.lucene.index.TestPrefixCodedTerms=357,210,411,646,366,562 +org.apache.lucene.index.TestRandomStoredFields=931,1020,3080,967,679,1555 +org.apache.lucene.index.TestReaderClosed=6,33,7,6,11,22 +org.apache.lucene.index.TestRollback=9,8,10,13,12,8 +org.apache.lucene.index.TestRollingUpdates=2867,832,724,1204,1141,1318 +org.apache.lucene.index.TestSameTokenSamePosition=64,34,30,43,23,66 +org.apache.lucene.index.TestSegmentMerger=348,472,269,819,463,548 +org.apache.lucene.index.TestSegmentReader=926,1133,1043,2495,1848,539 +org.apache.lucene.index.TestSegmentTermDocs=7228,843,10592,5894,628,1026 +org.apache.lucene.index.TestSegmentTermEnum=24,33,18,18,22,28 +org.apache.lucene.index.TestSizeBoundedForceMerge=154,153,40,182,127,317 +org.apache.lucene.index.TestSnapshotDeletionPolicy=1564,1644,1400,1354,2429,1607 +org.apache.lucene.index.TestStressAdvance=2038,1563,2260,1950,2734,885 +org.apache.lucene.index.TestStressIndexing=2026,2479,1748,1294,1984,1558 +org.apache.lucene.index.TestStressIndexing2=379,453,362,552,1449,436 +org.apache.lucene.index.TestStressNRT=967,1891,309,21646,2021,2231 +org.apache.lucene.index.TestSumDocFreq=130,126,157,42,137,525 +org.apache.lucene.index.TestTerm=2,3,2,3,230,4 +org.apache.lucene.index.TestTermVectorsReader=42,54,69,79,92,139 +org.apache.lucene.index.TestTermVectorsWriter=120,453,140,140,264,180 +org.apache.lucene.index.TestTermdocPerf=2,4,3,2,2,10 +org.apache.lucene.index.TestTermsEnum=4912,5669,5943,5380,4535,4739 +org.apache.lucene.index.TestTermsEnum2=1406,1819,3517,2180,547,430 +org.apache.lucene.index.TestThreadedForceMerge=762,750,1501,1700,1139,732 +org.apache.lucene.index.TestTieredMergePolicy=2463,1015,1707,1465,2645,1632 +org.apache.lucene.index.TestTransactionRollback=112,115,119,200,631,772 +org.apache.lucene.index.TestTransactions=623,800,718,779,1580,689 +org.apache.lucene.index.TestTypePromotion=3549,34,2525,35,4727,855 +org.apache.lucene.index.TestUniqueTermCount=29,42,18,25,22,34 +org.apache.lucene.index.memory.MemoryIndexTest=4979,4492,4902,4646,5152,3996 +org.apache.lucene.misc.SweetSpotSimilarityTest=315,312,308,336,345,367 +org.apache.lucene.misc.TestHighFreqTerms=1217,397,527,636,514,590 +org.apache.lucene.queries.BooleanFilterTest=908,1097,970,728,478,1003 +org.apache.lucene.queries.BoostingQueryTest=216,201,213,197,5,17 +org.apache.lucene.queries.ChainedFilterTest=1250,1188,1264,1057,2016,1883 +org.apache.lucene.queries.TermsFilterTest=501,684,516,487,107,576 +org.apache.lucene.queries.TestCustomScoreQuery=1913,2408,3202,2145,2669,7237 +org.apache.lucene.queries.function.TestFieldScoreQuery=515,402,500,420,974,418 +org.apache.lucene.queries.function.TestOrdValues=563,644,648,670,661,352 +org.apache.lucene.queries.mlt.TestMoreLikeThis=104,46,74,103,423,44 +org.apache.lucene.queryparser.analyzing.TestAnalyzingQueryParser=24,16,19,26,17,71 +org.apache.lucene.queryparser.classic.TestMultiAnalyzer=20,28,20,16,29,369 +org.apache.lucene.queryparser.classic.TestMultiFieldQueryParser=270,287,289,181,680,210 +org.apache.lucene.queryparser.classic.TestMultiPhraseQueryParsing=6,5,8,11,22,26 +org.apache.lucene.queryparser.classic.TestQueryParser=1152,998,1010,1173,580,539 +org.apache.lucene.queryparser.complexPhrase.TestComplexPhraseQuery=126,121,93,286,75,103 +org.apache.lucene.queryparser.ext.TestExtendableQueryParser=611,526,511,566,870,1217 +org.apache.lucene.queryparser.ext.TestExtensions=35,66,36,55,16,32 +org.apache.lucene.queryparser.flexible.core.builders.TestQueryTreeBuilder=45,8,7,9,8,14 +org.apache.lucene.queryparser.flexible.core.nodes.TestQueryNode=210,263,231,222,8,60 +org.apache.lucene.queryparser.flexible.messages.TestNLS=56,23,30,33,22,308 +org.apache.lucene.queryparser.flexible.precedence.TestPrecedenceQueryParser=387,476,528,392,124,549 +org.apache.lucene.queryparser.flexible.spans.TestSpanQueryParser=18,17,46,23,334,103 +org.apache.lucene.queryparser.flexible.spans.TestSpanQueryParserSimpleSample=9,27,12,10,28,15 +org.apache.lucene.queryparser.flexible.standard.TestMultiAnalyzerQPHelper=23,74,20,22,103,442 +org.apache.lucene.queryparser.flexible.standard.TestMultiFieldQPHelper=198,96,96,116,146,322 +org.apache.lucene.queryparser.flexible.standard.TestNumericQueryParser=556,462,506,612,401,704 +org.apache.lucene.queryparser.flexible.standard.TestQPHelper=763,803,1054,799,464,337 +org.apache.lucene.queryparser.surround.query.SrndQueryTest=461,516,420,424,54,29 +org.apache.lucene.queryparser.surround.query.Test01Exceptions=302,221,283,220,23,15 +org.apache.lucene.queryparser.surround.query.Test02Boolean=396,389,336,338,252,897 +org.apache.lucene.queryparser.surround.query.Test03Distance=1024,958,1189,1165,1414,2262 +org.apache.lucene.queryparser.xml.TestParser=1149,1577,1520,1789,2335,2309 +org.apache.lucene.queryparser.xml.TestQueryTemplateManager=401,293,332,279,353,882 +org.apache.lucene.queryparser.xml.builders.TestNumericRangeFilterBuilder=93,112,127,115,97,135 +org.apache.lucene.queryparser.xml.builders.TestNumericRangeQueryBuilder=62,56,66,65,193,96 +org.apache.lucene.sandbox.queries.DuplicateFilterTest=678,606,654,565,699,748 +org.apache.lucene.sandbox.queries.FuzzyLikeThisQueryTest=542,701,598,600,173,224 +org.apache.lucene.sandbox.queries.TestSlowCollationMethods=1471,1366,1464,1921,1697,2186 +org.apache.lucene.sandbox.queries.regex.TestJakartaRegexpCapabilities=272,222,211,234,6,25 +org.apache.lucene.sandbox.queries.regex.TestRegexQuery=479,531,452,439,741,886 +org.apache.lucene.sandbox.queries.regex.TestSpanRegexQuery=65,95,93,49,448,531 +org.apache.lucene.search.MultiCollectorTest=5,4,6,6,7,49 +org.apache.lucene.search.TestAutomatonQuery=320,204,62,61,668,399 +org.apache.lucene.search.TestAutomatonQueryUnicode=13,9,13,19,79,15 +org.apache.lucene.search.TestBoolean2=4798,1549,5126,5695,4014,17340 +org.apache.lucene.search.TestBooleanMinShouldMatch=427,1121,631,1473,1326,1728 +org.apache.lucene.search.TestBooleanOr=330,573,679,1027,455,993 +org.apache.lucene.search.TestBooleanQuery=135,100,289,120,182,72 +org.apache.lucene.search.TestBooleanScorer=61,14,21,28,116,14 +org.apache.lucene.search.TestCachingCollector=10,11,11,13,282,14 +org.apache.lucene.search.TestCachingWrapperFilter=103,78,25,31,25,180 +org.apache.lucene.search.TestComplexExplanations=1822,983,1373,1172,1204,1592 +org.apache.lucene.search.TestComplexExplanationsOfNonMatches=77,37,35,38,66,146 +org.apache.lucene.search.TestConstantScoreQuery=34,13,21,12,15,13 +org.apache.lucene.search.TestCustomSearcherSort=683,812,843,990,1451,1367 +org.apache.lucene.search.TestDateFilter=28,15,18,23,39,31 +org.apache.lucene.search.TestDateSort=22,13,15,11,19,10 +org.apache.lucene.search.TestDisjunctionMaxQuery=2407,930,1474,1290,1400,600 +org.apache.lucene.search.TestDocBoost=8,7,8,13,8,40 +org.apache.lucene.search.TestDocIdSet=25,8,12,12,9,18 +org.apache.lucene.search.TestDocValuesScoring=36,26,10,44,59,220 +org.apache.lucene.search.TestElevationComparator=18,35,18,25,391,110 +org.apache.lucene.search.TestExplanations=12,10,9,7,10,12 +org.apache.lucene.search.TestFieldCache=3264,786,664,502,1150,1279 +org.apache.lucene.search.TestFieldCacheRangeFilter=497,690,196,660,483,237 +org.apache.lucene.search.TestFieldCacheRewriteMethod=1266,875,1095,1141,2306,1079 +org.apache.lucene.search.TestFieldCacheTermsFilter=12,13,10,10,22,19 +org.apache.lucene.search.TestFieldValueFilter=31,27,36,28,90,22 +org.apache.lucene.search.TestFilteredQuery=240,304,323,432,241,280 +org.apache.lucene.search.TestFilteredSearch=33,25,41,39,264,23 +org.apache.lucene.search.TestFuzzyQuery=154,104,57,100,893,238 +org.apache.lucene.search.TestFuzzyQuery2=1099,450,1117,514,220,382 +org.apache.lucene.search.TestMatchAllDocsQuery=11,9,33,15,26,71 +org.apache.lucene.search.TestMultiPhraseQuery=504,833,826,791,245,210 +org.apache.lucene.search.TestMultiTermConstantScore=426,186,137,124,419,855 +org.apache.lucene.search.TestMultiTermQueryRewrites=24,22,15,45,26,27 +org.apache.lucene.search.TestMultiThreadTermVectors=1232,1360,1148,1634,287,209 +org.apache.lucene.search.TestMultiValuedNumericRangeQuery=1131,990,2837,817,896,420 +org.apache.lucene.search.TestNGramPhraseQuery=5,4,4,4,4,5 +org.apache.lucene.search.TestNRTManager=2556,60,5400,5146,2727,1729 +org.apache.lucene.search.TestNot=18,8,14,8,10,60 +org.apache.lucene.search.TestNumericRangeQuery32=3513,6453,5145,5126,8550,6148 +org.apache.lucene.search.TestNumericRangeQuery64=17221,12483,22047,7784,15993,6128 +org.apache.lucene.search.TestPhrasePrefixQuery=8,13,30,7,10,10 +org.apache.lucene.search.TestPhraseQuery=3164,6474,5922,4556,1475,1734 +org.apache.lucene.search.TestPositionIncrement=15,19,13,17,30,60 +org.apache.lucene.search.TestPositiveScoresOnlyCollector=5,4,6,17,4,8 +org.apache.lucene.search.TestPrefixFilter=6,8,6,16,16,14 +org.apache.lucene.search.TestPrefixInBooleanQuery=756,635,1199,646,3182,636 +org.apache.lucene.search.TestPrefixQuery=8,9,8,32,10,7 +org.apache.lucene.search.TestPrefixRandom=1826,3314,769,990,925,93 +org.apache.lucene.search.TestQueryWrapperFilter=966,320,1226,3325,449,877 +org.apache.lucene.search.TestRegexpQuery=46,81,45,47,509,130 +org.apache.lucene.search.TestRegexpRandom=110,84,162,164,213,165 +org.apache.lucene.search.TestRegexpRandom2=2429,5399,4815,5980,4176,1425 +org.apache.lucene.search.TestScoreCachingWrappingScorer=6,5,4,6,5,19 +org.apache.lucene.search.TestScorerPerf=574,1215,1022,661,261,478 +org.apache.lucene.search.TestSearchAfter=79,43,86,101,76,76 +org.apache.lucene.search.TestSearchWithThreads=1482,2318,2046,4084,2464,1303 +org.apache.lucene.search.TestSearcherManager=2658,9519,4796,2247,5070,4181 +org.apache.lucene.search.TestShardSearching=6964,4585,3932,5770,4762,4415 +org.apache.lucene.search.TestSimilarity=9,11,8,17,13,9 +org.apache.lucene.search.TestSimilarityProvider=11,9,6,11,12,26 +org.apache.lucene.search.TestSimpleExplanations=683,855,736,3021,1272,1326 +org.apache.lucene.search.TestSimpleExplanationsOfNonMatches=67,60,139,441,131,511 +org.apache.lucene.search.TestSimpleSearchEquivalence=194 +org.apache.lucene.search.TestSloppyPhraseQuery=3752,3070,3444,3542,3855,1790 +org.apache.lucene.search.TestSloppyPhraseQuery2=1943 +org.apache.lucene.search.TestSort=2189,2422,3173,2332,4442,1971 +org.apache.lucene.search.TestSubScorerFreqs=16,18,15,36,13,66 +org.apache.lucene.search.TestTermRangeFilter=565,778,1616,415,256,331 +org.apache.lucene.search.TestTermRangeQuery=45,55,71,65,224,262 +org.apache.lucene.search.TestTermScorer=19,19,47,19,26,46 +org.apache.lucene.search.TestTermVectors=252,408,547,312,738,180 +org.apache.lucene.search.TestTimeLimitingCollector=4680,4452,3286,2701,1733,3343 +org.apache.lucene.search.TestTopDocsCollector=55,55,37,95,512,82 +org.apache.lucene.search.TestTopDocsMerge=3992,4115,1374,3679,4153,3706 +org.apache.lucene.search.TestTopScoreDocCollector=5,7,7,9,7,11 +org.apache.lucene.search.TestWildcard=257,91,61,69,288,68 +org.apache.lucene.search.TestWildcardRandom=93,78,98,92,350,64 +org.apache.lucene.search.grouping.AllGroupHeadsCollectorTest=3657,3132,4805,4304,4269,1910 +org.apache.lucene.search.grouping.AllGroupsCollectorTest=456,430,426,444,514,83 +org.apache.lucene.search.grouping.DistinctValuesCollectorTest=1328 +org.apache.lucene.search.grouping.GroupFacetCollectorTest=4246 +org.apache.lucene.search.grouping.GroupingSearchTest=456 +org.apache.lucene.search.grouping.TestGrouping=5285,5376,5745,5765,6922,6788 +org.apache.lucene.search.highlight.HighlighterPhraseTest=228,161,162,229,131,118 +org.apache.lucene.search.highlight.HighlighterTest=1405,1161,1142,1411,1965,2060 +org.apache.lucene.search.highlight.OffsetLimitTokenFilterTest=11,12,10,11,5,11 +org.apache.lucene.search.highlight.TokenSourcesTest=214,193,158,165,116,157 +org.apache.lucene.search.highlight.custom.HighlightCustomQueryTest=11,7,11,8,21,57 +org.apache.lucene.search.join.TestBlockJoin=3159,4003,2733,3472,3350,3427 +org.apache.lucene.search.join.TestJoinUtil=5935,6234,6195,7757,8756,9496 +org.apache.lucene.search.payloads.TestPayloadExplanations=1410,324,339,314,310,297 +org.apache.lucene.search.payloads.TestPayloadNearQuery=759,613,120,1657,1403,210 +org.apache.lucene.search.payloads.TestPayloadTermQuery=463,1159,2769,695,1112,182 +org.apache.lucene.search.similarities.TestSimilarity2=132,71,101,125,274,237 +org.apache.lucene.search.similarities.TestSimilarityBase=2059,1801,1740,1474,1085,531 +org.apache.lucene.search.spans.TestBasics=9064,7511,8748,9945,9025,8026 +org.apache.lucene.search.spans.TestFieldMaskingSpanQuery=190,1213,467,273,267,202 +org.apache.lucene.search.spans.TestNearSpansOrdered=82,92,47,87,120,39 +org.apache.lucene.search.spans.TestPayloadSpans=755,677,1262,515,3286,571 +org.apache.lucene.search.spans.TestSpanExplanations=582,2817,2582,816,455,1506 +org.apache.lucene.search.spans.TestSpanExplanationsOfNonMatches=51,28,59,75,49,98 +org.apache.lucene.search.spans.TestSpanFirstQuery=44,7,9,11,14,12 +org.apache.lucene.search.spans.TestSpanMultiTermQueryWrapper=63,50,71,37,306,29 +org.apache.lucene.search.spans.TestSpanSearchEquivalence=99 +org.apache.lucene.search.spans.TestSpans=1628,1823,2607,2122,233,186 +org.apache.lucene.search.spans.TestSpansAdvanced=24,59,25,156,37,58 +org.apache.lucene.search.spans.TestSpansAdvanced2=85,104,108,145,210,358 +org.apache.lucene.search.spell.TestDirectSpellChecker=544,604,516,663,695,1019 +org.apache.lucene.search.spell.TestJaroWinklerDistance=7,10,8,7,5,11 +org.apache.lucene.search.spell.TestLevenshteinDistance=6,10,12,9,254,256 +org.apache.lucene.search.spell.TestLuceneDictionary=760,641,666,766,778,359 +org.apache.lucene.search.spell.TestNGramDistance=21,21,17,24,18,100 +org.apache.lucene.search.spell.TestPlainTextDictionary=356,296,269,324,45,554 +org.apache.lucene.search.spell.TestSpellChecker=3390,4686,4593,4629,3952,5968 +org.apache.lucene.search.suggest.LookupBenchmarkTest=14,9,14,12,3,5 +org.apache.lucene.search.suggest.PersistenceTest=41,38,43,42,132,205 +org.apache.lucene.search.suggest.TestBytesRefList=78 +org.apache.lucene.search.suggest.TestHighFrequencyDictionary=398 +org.apache.lucene.search.suggest.TestTermFreqIterator=707 +org.apache.lucene.search.suggest.fst.BytesRefSortersTest=324,331,295,288,294,158 +org.apache.lucene.search.suggest.fst.FSTCompletionTest=624,630,702,584,326,2279 +org.apache.lucene.search.suggest.fst.FloatMagicTest=9,12,15,10,9 +org.apache.lucene.search.suggest.fst.TestSort=3757,4325,4061,4481,4273,4597 +org.apache.lucene.search.suggest.fst.WFSTCompletionTest=1585,968 +org.apache.lucene.search.vectorhighlight.BreakIteratorBoundaryScannerTest=21,21,17,19,12,49 +org.apache.lucene.search.vectorhighlight.FieldPhraseListTest=293,255,256,282,274,887 +org.apache.lucene.search.vectorhighlight.FieldQueryTest=707,593,614,664,141,384 +org.apache.lucene.search.vectorhighlight.FieldTermStackTest=730,744,777,664,300,337 +org.apache.lucene.search.vectorhighlight.IndexTimeSynonymTest=373,406,389,379,398,946 +org.apache.lucene.search.vectorhighlight.ScoreOrderFragmentsBuilderTest=564,574,482,480,463,68 +org.apache.lucene.search.vectorhighlight.SimpleBoundaryScannerTest=8,7,8,10,4,35 +org.apache.lucene.search.vectorhighlight.SimpleFragListBuilderTest=426,367,392,343,864,941 +org.apache.lucene.search.vectorhighlight.SimpleFragmentsBuilderTest=660,713,774,800,296,375 +org.apache.lucene.search.vectorhighlight.SingleFragListBuilderTest=118,86,116,73,594,63 +org.apache.lucene.spatial.DistanceUtilsTest=33,23,28,36,264 +org.apache.lucene.spatial.TestTestFramework=246 +org.apache.lucene.spatial.geohash.TestGeoHashUtils=209,257,250,230,219 +org.apache.lucene.spatial.geometry.TestDistanceUnits=232,217,264,202,15 +org.apache.lucene.spatial.prefix.TestSpatialPrefixField=244 +org.apache.lucene.spatial.prefix.TestTermQueryPrefixGridStrategy=424 +org.apache.lucene.spatial.prefix.tree.SpatialPrefixTreeTest=216 +org.apache.lucene.spatial.tier.TestCartesian=1039,999,1060,1061,929 +org.apache.lucene.spatial.tier.TestDistance=495,539,510,529,510 +org.apache.lucene.spatial.tier.projections.SinusoidalProjectorTest=6,10,15,26,8 +org.apache.lucene.store.TestBufferedIndexInput=2269,1131,5331,3833,2510,2152 +org.apache.lucene.store.TestByteArrayDataInput=3,4,3,2,2,3 +org.apache.lucene.store.TestCopyBytes=558,757,556,650,324,449 +org.apache.lucene.store.TestDirectory=28,24,29,27,28,34 +org.apache.lucene.store.TestFileSwitchDirectory=31,36,73,34,58,42 +org.apache.lucene.store.TestHugeRamFile=859,794,2281,654,1308,1271 +org.apache.lucene.store.TestLock=13,12,13,22,12,15 +org.apache.lucene.store.TestLockFactory=1039,1025,1061,1237,1030,1039 +org.apache.lucene.store.TestMultiMMap=5453,4687,3883,5281,4681,4590 +org.apache.lucene.store.TestNRTCachingDirectory=3631,661,3190,2063,1291,1713 +org.apache.lucene.store.TestRAMDirectory=2038,2258,2197,2227,375,290 +org.apache.lucene.store.TestWindowsMMap=280,135,280,185,50,59 +org.apache.lucene.util.TestArrayUtil=2332,2353,2346,3829,2049,3773 +org.apache.lucene.util.TestAttributeSource=6,7,10,10,10,12 +org.apache.lucene.util.TestBitUtil=3,3,3,4,3,4 +org.apache.lucene.util.TestByteBlockPool=3,3,2,3,2,7 +org.apache.lucene.util.TestBytesRef=4,8,4,4,4,8 +org.apache.lucene.util.TestBytesRefHash=65,50,33,122,56,1277 +org.apache.lucene.util.TestCharsRef=13,12,14,13,44,20 +org.apache.lucene.util.TestCloseableThreadLocal=4,4,4,4,4,4 +org.apache.lucene.util.TestCollectionUtil=1772,1724,1887,1494,1343,1963 +org.apache.lucene.util.TestDoubleBarrelLRUCache=1007,1008,1009,1006,1005,1009 +org.apache.lucene.util.TestFieldCacheSanityChecker=560,559,414,759,2030,452 +org.apache.lucene.util.TestFixedBitSet=627,562,870,559,970,2356 +org.apache.lucene.util.TestIOUtils=4,3,4,5,149,10 +org.apache.lucene.util.TestIdentityHashSet=133 +org.apache.lucene.util.TestIndexableBinaryStringTools=129,352,166,126,732,239 +org.apache.lucene.util.TestJUnitRuleOrder=4,3,2,2 +org.apache.lucene.util.TestNamedSPILoader=3,5,2,5,3,32 +org.apache.lucene.util.TestNumericUtils=1288,1565,1011,910,1203,746 +org.apache.lucene.util.TestOpenBitSet=1607,2194,2600,2196,1461,3886 +org.apache.lucene.util.TestPagedBytes=537,295,199,387,318,1058 +org.apache.lucene.util.TestPriorityQueue=14,25,61,16,20,96 +org.apache.lucene.util.TestRamUsageEstimator=4,2,2,3,4,10 +org.apache.lucene.util.TestRamUsageEstimatorOnWildAnimals=890 +org.apache.lucene.util.TestRecyclingByteBlockAllocator=17,18,26,29,24,19 +org.apache.lucene.util.TestRollingBuffer=126 +org.apache.lucene.util.TestRollingCharBuffer=752 +org.apache.lucene.util.TestSentinelIntSet=132,106,108,113,168,177 +org.apache.lucene.util.TestSetOnce=18,39,15,16,16,20 +org.apache.lucene.util.TestSetupTeardownMethods=24,26,32,25 +org.apache.lucene.util.TestSmallFloat=27,34,31,29,30,66 +org.apache.lucene.util.TestSortedVIntList=21,21,22,27,22,83 +org.apache.lucene.util.TestTwoPhaseCommitTool=8,8,5,8,13,25 +org.apache.lucene.util.TestUnicodeUtil=257,256,314,244,178,211 +org.apache.lucene.util.TestVersion=2,2,2,4,2,17 +org.apache.lucene.util.TestVersionComparator=2,3,4,3,2,5 +org.apache.lucene.util.TestVirtualMethod=6,6,5,4,6,12 +org.apache.lucene.util.TestWeakIdentityMap=531,692,1114,445,535,472 +org.apache.lucene.util.UnsafeByteArrayInputStreamTest=233,253,272,218,23,26 +org.apache.lucene.util.UnsafeByteArrayOutputStreamTest=18,21,18,23,7,74 +org.apache.lucene.util.Vint8Test=12,9,12,15,8,505 +org.apache.lucene.util.automaton.TestBasicOperations=161,81,130,142,365,121 +org.apache.lucene.util.automaton.TestCompiledAutomaton=153,430,39,160,206,102 +org.apache.lucene.util.automaton.TestDeterminism=578,683,836,1021,566,464 +org.apache.lucene.util.automaton.TestDeterminizeLexicon=1174,1123,591,627,426,578 +org.apache.lucene.util.automaton.TestLevenshteinAutomata=662,800,596,463,1239,1713 +org.apache.lucene.util.automaton.TestMinimize=1910,2067,2633,1573,1965,1475 +org.apache.lucene.util.automaton.TestSpecialOperations=55,91,106,102,59,1228 +org.apache.lucene.util.automaton.TestUTF32ToUTF8=1485,1659,2310,788,849,809 +org.apache.lucene.util.collections.ArrayHashMapTest=97,100,33,89,37,95 +org.apache.lucene.util.collections.FloatToObjectMapTest=32,35,31,31,16,96 +org.apache.lucene.util.collections.IntArrayTest=6,6,8,12,12,172 +org.apache.lucene.util.collections.IntHashSetTest=81,59,62,53,47,152 +org.apache.lucene.util.collections.IntToDoubleMapTest=70,70,68,66,21,41 +org.apache.lucene.util.collections.IntToIntMapTest=49,42,34,34,58,135 +org.apache.lucene.util.collections.IntToObjectMapTest=165,28,73,61,36,45 +org.apache.lucene.util.collections.ObjectToFloatMapTest=68,57,79,72,19,47 +org.apache.lucene.util.collections.ObjectToIntMapTest=41,36,44,57,73,133 +org.apache.lucene.util.collections.TestLRUHashMap=8,4,5,4,2,9 +org.apache.lucene.util.encoding.EncodingTest=221,197,174,203,227,265 +org.apache.lucene.util.fst.TestFSTs=11876,220,10497,11092,10118,8691 +org.apache.lucene.util.junitcompat.TestExceptionInBeforeClassHooks=48 +org.apache.lucene.util.junitcompat.TestJUnitRuleOrder=9,40 +org.apache.lucene.util.junitcompat.TestReproduceMessage=61,201 +org.apache.lucene.util.junitcompat.TestSeedFromUncaught=49 +org.apache.lucene.util.junitcompat.TestSetupTeardownChaining=13,105 +org.apache.lucene.util.junitcompat.TestSystemPropertiesInvariantRule=60 +org.apache.lucene.util.packed.TestPackedInts=3696,2679,2803,3121,3115,4577 +org.apache.solr.BasicFunctionalityTest=1500,1334,1232,1102,2943,1626 +org.apache.solr.ConvertedLegacyTest=2435,1872,2131,2620,5835,2139 +org.apache.solr.DisMaxRequestHandlerTest=459,523,446,603,492,3005 +org.apache.solr.EchoParamsTest=93,93,91,81,182,127 +org.apache.solr.MinimalSchemaTest=173,151,162,151,311,214 +org.apache.solr.OutputWriterTest=193,374,199,230,195,191 +org.apache.solr.SampleTest=220,221,233,302,165,146 +org.apache.solr.SolrInfoMBeanTest=1481,434,1136,1501,2498,439 +org.apache.solr.TestDistributedGrouping=14182,13937,13655,14416,38066,17556 +org.apache.solr.TestDistributedSearch=16279,14823,15468,15004,36045,43894 +org.apache.solr.TestGroupingSearch=4797,5620,5990,6053,3933,2905 +org.apache.solr.TestJoin=7374,7450,7478,6796,16137,14055 +org.apache.solr.TestPluginEnable=71,54,56,58,172,66 +org.apache.solr.TestSolrCoreProperties=109,183,86,212,380,167 +org.apache.solr.TestTrie=2342,2345,2368,2558,1844,806 +org.apache.solr.analysis.CommonGramsFilterFactoryTest=21,8,11,20,7,12 +org.apache.solr.analysis.CommonGramsQueryFilterFactoryTest=6,7,6,8,6,12 +org.apache.solr.analysis.DoubleMetaphoneFilterFactoryTest=11,13,7,12,10,7 +org.apache.solr.analysis.LegacyHTMLStripCharFilterTest=194,88,98,159,119,141 +org.apache.solr.analysis.LengthFilterTest=6,5,6,7,2,7 +org.apache.solr.analysis.SnowballPorterFilterFactoryTest=10,11,12,9,13,18 +org.apache.solr.analysis.TestArabicFilters=8,10,11,6,11,18 +org.apache.solr.analysis.TestBeiderMorseFilterFactory=331,303,237,330,250,285 +org.apache.solr.analysis.TestBrazilianStemFilterFactory=6,4,7,17,3,6 +org.apache.solr.analysis.TestBulgarianStemFilterFactory=3,3,4,3,4,12 +org.apache.solr.analysis.TestCJKBigramFilterFactory=4,4,3,4,5,11 +org.apache.solr.analysis.TestCJKTokenizerFactory=4,9,5,8,26,34 +org.apache.solr.analysis.TestCJKWidthFilterFactory=3,5,3,4,2,7 +org.apache.solr.analysis.TestCapitalizationFilterFactory=7,12,8,10,25,41 +org.apache.solr.analysis.TestChineseFilterFactory=4,3,3,2,2,14 +org.apache.solr.analysis.TestChineseTokenizerFactory=6,5,5,6,3,8 +org.apache.solr.analysis.TestCollationKeyFilterFactory=118,93,109,109,74,126 +org.apache.solr.analysis.TestCollationKeyRangeQueries=149,105,164,105,313,122 +org.apache.solr.analysis.TestCzechStemFilterFactory=4,3,3,2,10,5 +org.apache.solr.analysis.TestDelimitedPayloadTokenFilterFactory=4,6,4,4,4,6 +org.apache.solr.analysis.TestDictionaryCompoundWordTokenFilterFactory=15,5,5,8,7,5 +org.apache.solr.analysis.TestElisionFilterFactory=5,8,6,5,10,14 +org.apache.solr.analysis.TestEnglishMinimalStemFilterFactory=2,2,3,3,5,8 +org.apache.solr.analysis.TestFinnishLightStemFilterFactory=10,6,5,9,3,5 +org.apache.solr.analysis.TestFoldingMultitermExtrasQuery=2595 +org.apache.solr.analysis.TestFrenchLightStemFilterFactory=7,3,8,8,11,10 +org.apache.solr.analysis.TestFrenchMinimalStemFilterFactory=3,6,4,3,5,5 +org.apache.solr.analysis.TestGalicianMinimalStemFilterFactory=13,14,14,16,14,16 +org.apache.solr.analysis.TestGalicianStemFilterFactory=22,20,27,23,16,16 +org.apache.solr.analysis.TestGermanLightStemFilterFactory=5,3,6,3,4,6 +org.apache.solr.analysis.TestGermanMinimalStemFilterFactory=4,3,3,3,4,4 +org.apache.solr.analysis.TestGermanNormalizationFilterFactory=3,2,3,4,3,4 +org.apache.solr.analysis.TestGermanStemFilterFactory=5,4,3,4,3,8 +org.apache.solr.analysis.TestGreekLowerCaseFilterFactory=3,2,3,2,7,6 +org.apache.solr.analysis.TestGreekStemFilterFactory=6,5,4,5,7,31 +org.apache.solr.analysis.TestHTMLStripCharFilterFactory=51,6,9,6,38,108 +org.apache.solr.analysis.TestHindiFilters=12,7,8,8,9,25 +org.apache.solr.analysis.TestHungarianLightStemFilterFactory=13,3,7,12,4,5 +org.apache.solr.analysis.TestHunspellStemFilterFactory=7,8,7,8,10,18 +org.apache.solr.analysis.TestHyphenationCompoundWordTokenFilterFactory=80,79,74,84,129,105 +org.apache.solr.analysis.TestICUCollationKeyFilterFactory=1033,876,858,854,705,861 +org.apache.solr.analysis.TestICUCollationKeyRangeQueries=278,273,311,236,2032,1628 +org.apache.solr.analysis.TestICUFoldingFilterFactory=319,291,301,285,18,55 +org.apache.solr.analysis.TestICUNormalizer2FilterFactory=334,323,311,272,301,18 +org.apache.solr.analysis.TestICUTokenizerFactory=156,150,156,493,194,388 +org.apache.solr.analysis.TestICUTransformFilterFactory=984,935,811,837,857,678 +org.apache.solr.analysis.TestIndonesianStemFilterFactory=6,4,3,7,8,6 +org.apache.solr.analysis.TestIrishLowerCaseFilterFactory=5 +org.apache.solr.analysis.TestItalianLightStemFilterFactory=2,3,4,3,3,6 +org.apache.solr.analysis.TestJapaneseBaseFormFilterFactory=230 +org.apache.solr.analysis.TestJapanesePartOfSpeechStopFilterFactory=222 +org.apache.solr.analysis.TestJapaneseTokenizerFactory=192 +org.apache.solr.analysis.TestKStemFilterFactory=103,98,100,109,123,111 +org.apache.solr.analysis.TestKeepFilterFactory=7,5,7,5,6,6 +org.apache.solr.analysis.TestKeywordMarkerFilterFactory=7,5,8,7,7,525 +org.apache.solr.analysis.TestKuromojiBaseFormFilterFactory=385,235,442,485,283 +org.apache.solr.analysis.TestKuromojiPartOfSpeechStopFilterFactory=206,201,195,216,4 +org.apache.solr.analysis.TestKuromojiTokenizerFactory=324,299,13,213,515 +org.apache.solr.analysis.TestLatvianStemFilterFactory=11,8,32,14,5,10 +org.apache.solr.analysis.TestLuceneMatchVersion=154,134,217,136,308,132 +org.apache.solr.analysis.TestMappingCharFilterFactory=3,3,2,4,2,3 +org.apache.solr.analysis.TestMorfologikFilterFactory=457 +org.apache.solr.analysis.TestMultiWordSynonyms=12,10,11,9,12,14 +org.apache.solr.analysis.TestNGramFilters=12,18,12,12,78,70 +org.apache.solr.analysis.TestNorwegianLightStemFilterFactory=9 +org.apache.solr.analysis.TestNorwegianMinimalStemFilterFactory=7 +org.apache.solr.analysis.TestPatternReplaceCharFilterFactory=16,5,8,16,8,34 +org.apache.solr.analysis.TestPatternReplaceFilterFactory=6,5,5,6,2,17 +org.apache.solr.analysis.TestPatternTokenizerFactory=5,5,5,6,2,353 +org.apache.solr.analysis.TestPersianNormalizationFilterFactory=5,5,5,5,2,6 +org.apache.solr.analysis.TestPhoneticFilterFactory=20120,5958,14344,15767,18979,6893 +org.apache.solr.analysis.TestPorterStemFilterFactory=18,4,4,6,5,8 +org.apache.solr.analysis.TestPortugueseLightStemFilterFactory=8,7,8,8,22,5 +org.apache.solr.analysis.TestPortugueseMinimalStemFilterFactory=16,11,22,18,19,45 +org.apache.solr.analysis.TestPortugueseStemFilterFactory=8,8,11,15,30,22 +org.apache.solr.analysis.TestRemoveDuplicatesTokenFilterFactory=2,3,2,1,2,6 +org.apache.solr.analysis.TestReverseStringFilterFactory=3,3,3,2,2,5 +org.apache.solr.analysis.TestReversedWildcardFilterFactory=464,393,542,591,669,1228 +org.apache.solr.analysis.TestRussianFilters=4,5,3,3,4,4 +org.apache.solr.analysis.TestRussianLightStemFilterFactory=10,11,8,8,11,8 +org.apache.solr.analysis.TestShingleFilterFactory=20,15,14,23,13,50 +org.apache.solr.analysis.TestSlowSynonymFilter=18,12,17,14,29,35 +org.apache.solr.analysis.TestSmartChineseFactories=441,474,430,450,296,920 +org.apache.solr.analysis.TestSpanishLightStemFilterFactory=7,4,8,4,5,5 +org.apache.solr.analysis.TestStandardFactories=15,27,19,15,18,23 +org.apache.solr.analysis.TestStemmerOverrideFilterFactory=5,7,4,4,5,12 +org.apache.solr.analysis.TestStempelPolishStemFilterFactory=235,151,176,157,176,605 +org.apache.solr.analysis.TestStopFilterFactory=4,7,5,5,7,43 +org.apache.solr.analysis.TestSwedishLightStemFilterFactory=5,3,8,8,3,16 +org.apache.solr.analysis.TestSynonymFilterFactory=17,10,14,10,15,13 +org.apache.solr.analysis.TestSynonymMap=8,8,7,8,14,15 +org.apache.solr.analysis.TestThaiWordFilterFactory=19,29,28,24,22,93 +org.apache.solr.analysis.TestTrimFilterFactory=10,3,6,3,3,5 +org.apache.solr.analysis.TestTurkishLowerCaseFilterFactory=4,4,8,4,15,5 +org.apache.solr.analysis.TestTypeTokenFilterFactory=12,7,8,7,6,13 +org.apache.solr.analysis.TestUAX29URLEmailTokenizerFactory=51,46,48,67,81,60 +org.apache.solr.analysis.TestWikipediaTokenizerFactory=7,6,6,9,7,15 +org.apache.solr.analysis.TestWordDelimiterFilterFactory=909,900,822,830,1455,914 +org.apache.solr.client.solrj.BasicHttpSolrServerTest=3811 +org.apache.solr.client.solrj.SolrExampleBinaryTest=7643,7512,6962,7091,6928,5165 +org.apache.solr.client.solrj.SolrExceptionTest=367,353,311,358,42,14 +org.apache.solr.client.solrj.SolrQueryTest=16,13,16,14,240,27 +org.apache.solr.client.solrj.TestBatchUpdate=4209,4045,4004,4079,2305,4178 +org.apache.solr.client.solrj.TestLBHttpSolrServer=8879,9830,9004,7846,7490,9731 +org.apache.solr.client.solrj.beans.TestDocumentObjectBinder=26,31,36,36,18,31 +org.apache.solr.client.solrj.embedded.JettyWebappTest=4743,4566,4392,4602,2111,1012 +org.apache.solr.client.solrj.embedded.LargeVolumeBinaryJettyTest=847,976,1062,825,1244,1745 +org.apache.solr.client.solrj.embedded.LargeVolumeEmbeddedTest=1221,1333,1358,1091,1778,1588 +org.apache.solr.client.solrj.embedded.LargeVolumeJettyTest=1319,1275,1364,1296,918,1102 +org.apache.solr.client.solrj.embedded.MergeIndexesEmbeddedTest=717,775,734,667,586,561 +org.apache.solr.client.solrj.embedded.MultiCoreEmbeddedTest=420,415,367,381,314,321 +org.apache.solr.client.solrj.embedded.MultiCoreExampleJettyTest=735,952,968,943,697,1396 +org.apache.solr.client.solrj.embedded.SolrExampleEmbeddedTest=4953,4762,4825,4703,4249,7319 +org.apache.solr.client.solrj.embedded.SolrExampleJettyTest=4491,4580,4337,4405,4428,4926 +org.apache.solr.client.solrj.embedded.SolrExampleStreamingBinaryTest=9544,9591,9796,9684,12707,13200 +org.apache.solr.client.solrj.embedded.SolrExampleStreamingTest=10177,9896,10353,10507,10402,10479 +org.apache.solr.client.solrj.embedded.TestEmbeddedSolrServer=349,546,621,406,1682,501 +org.apache.solr.client.solrj.embedded.TestSolrProperties=403,366,400,357,712,3070 +org.apache.solr.client.solrj.request.TestUpdateRequestCodec=8,6,8,6,5,51 +org.apache.solr.client.solrj.response.AnlysisResponseBaseTest=4,5,5,5,5,25 +org.apache.solr.client.solrj.response.DocumentAnalysisResponseTest=5,5,6,7,3,20 +org.apache.solr.client.solrj.response.FacetFieldTest=4,4,4,6,196,12 +org.apache.solr.client.solrj.response.FieldAnalysisResponseTest=7,3,3,4,2,282 +org.apache.solr.client.solrj.response.QueryResponseTest=29,40,31,29,18,22 +org.apache.solr.client.solrj.response.TermsResponseTest=2499,2741,2620,2502,509,600 +org.apache.solr.client.solrj.response.TestSpellCheckResponse=1118,1091,1060,985,3064,788 +org.apache.solr.client.solrj.util.ClientUtilsTest=4,5,7,2,7,7 +org.apache.solr.cloud.BasicDistributedZkTest=23174,23981,24200,23322,39077,34105 +org.apache.solr.cloud.BasicZkTest=9193,9055,9447,10470,10521,8457 +org.apache.solr.cloud.ChaosMonkeyNothingIsSafeTest=1,0,1,1,0,1 +org.apache.solr.cloud.ChaosMonkeySafeLeaderTest=1,0,0,1,0,1 +org.apache.solr.cloud.CloudStateTest=5,4,4,3,3,13 +org.apache.solr.cloud.CloudStateUpdateTest=11657,12138,13721,13338,18033,9772 +org.apache.solr.cloud.FullSolrCloudDistribCmdsTest=52827,44720,50511,51658,58897,36111 +org.apache.solr.cloud.FullSolrCloudTest=0,1,1,1,1,1 +org.apache.solr.cloud.LeaderElectionIntegrationTest=25451,24310,23188,23681,36677,23255 +org.apache.solr.cloud.LeaderElectionTest=34997,20373,17821,24752,26838,29423 +org.apache.solr.cloud.NodeStateWatcherTest=24232 +org.apache.solr.cloud.OverseerTest=29472,28619,29386,29074,53998,57563 +org.apache.solr.cloud.RecoveryZkTest=25932,27672,26705,26491,23463,29315 +org.apache.solr.cloud.TestHashPartitioner=4678,5325,5437,4308,14818,15647 +org.apache.solr.cloud.TestMultiCoreConfBootstrap=4425 +org.apache.solr.cloud.ZkControllerTest=7251,7097,7069,7015,7480,15652 +org.apache.solr.cloud.ZkNodePropsTest=10,12,9,12,16,4 +org.apache.solr.cloud.ZkSolrClientTest=15964,15673,15688,15912,16082,17076 +org.apache.solr.common.SolrDocumentTest=10,15,19,9,14,72 +org.apache.solr.common.params.ModifiableSolrParamsTest=9,9,10,11,15,38 +org.apache.solr.common.params.SolrParamTest=217,202,212,222,7,21 +org.apache.solr.common.util.ContentStreamTest=220,287,387,183,208,1104 +org.apache.solr.common.util.DOMUtilTest=16,20,16,18,11 +org.apache.solr.common.util.FileUtilsTest=3,3,3,4,3 +org.apache.solr.common.util.IteratorChainTest=11,9,12,14,8,62 +org.apache.solr.common.util.NamedListTest=8,7,3,3,2,6 +org.apache.solr.common.util.TestFastInputStream=7,6,22,4,3,21 +org.apache.solr.common.util.TestHash=44,65,67,53,128,178 +org.apache.solr.common.util.TestJavaBinCodec=110,127,295,157,92,149 +org.apache.solr.common.util.TestNamedListCodec=958,817,1042,878,771,879 +org.apache.solr.common.util.TestSystemIdResolver=4,6,4,4,2 +org.apache.solr.common.util.TestXMLEscaping=10,9,12,9,16,14 +org.apache.solr.core.AlternateDirectoryTest=396,309,349,472,375,404 +org.apache.solr.core.IndexReaderFactoryTest=470,506,635,452,610,371 +org.apache.solr.core.PluginInfoTest=24,34,61,34,34,29 +org.apache.solr.core.RAMDirectoryFactoryTest=2,2,3,3,3,8 +org.apache.solr.core.RequestHandlersTest=409,648,631,717,388,1234 +org.apache.solr.core.ResourceLoaderTest=24,24,29,25,9,11 +org.apache.solr.core.SOLR749Test=299,277,271,341,536,467 +org.apache.solr.core.SolrCoreCheckLockOnStartupTest=769 +org.apache.solr.core.SolrCoreTest=2185,2198,2247,2191,2543,2586 +org.apache.solr.core.TestArbitraryIndexDir=704,441,457,481,1804,449 +org.apache.solr.core.TestBadConfig=4,6,5,7,6,13 +org.apache.solr.core.TestCodecSupport=68,73,71,84,70,108 +org.apache.solr.core.TestConfig=109,108,108,103,135,313 +org.apache.solr.core.TestCoreContainer=1266,1268,1275,1212,5313,3500 +org.apache.solr.core.TestJmxIntegration=1745,1683,1579,1663,1451,824 +org.apache.solr.core.TestJmxMonitoredMap=690,76,184,158,294,104 +org.apache.solr.core.TestLegacyMergeSchedulerPolicyConfig=310,315,294,393,624 +org.apache.solr.core.TestMergePolicyConfig=292,332,284,297,546,415 +org.apache.solr.core.TestPropInject=711,699,619,723,561,774 +org.apache.solr.core.TestPropInjectDefaults=384,418,445,564,1957,390 +org.apache.solr.core.TestQuerySenderListener=450,306,352,303,289,455 +org.apache.solr.core.TestQuerySenderNoQuery=357,327,304,349,472,349 +org.apache.solr.core.TestSolrDeletionPolicy1=609,667,606,668,579,3393 +org.apache.solr.core.TestSolrDeletionPolicy2=1273,364,846,1457,398,2649 +org.apache.solr.core.TestSolrXMLSerializer=46,64,57,46,11,16 +org.apache.solr.core.TestXIncludeConfig=339,298,320,315,399,478 +org.apache.solr.handler.BinaryUpdateRequestHandlerTest=414,454,349,401,1578,516 +org.apache.solr.handler.CSVRequestHandlerTest=1187,377,944,1423,967,569 +org.apache.solr.handler.DocumentAnalysisRequestHandlerTest=732,823,727,751,2409,435 +org.apache.solr.handler.FieldAnalysisRequestHandlerTest=354,363,459,415,989,966 +org.apache.solr.handler.JsonLoaderTest=359,410,340,520,1926,547 +org.apache.solr.handler.MoreLikeThisHandlerTest=2497,2699,2495,2473,435,638 +org.apache.solr.handler.StandardRequestHandlerTest=1569,1623,1609,1603,574,479 +org.apache.solr.handler.TestCSVLoader=589,540,486,489,1444,1570 +org.apache.solr.handler.TestReplicationHandler=22484,24030,23080,23374,39748,23441 +org.apache.solr.handler.XmlUpdateRequestHandlerTest=569,503,731,612,1265,446 +org.apache.solr.handler.XsltUpdateRequestHandlerTest=856,863,892,892,2695,588 +org.apache.solr.handler.admin.CoreAdminHandlerTest=753,871,795,993,933,3415 +org.apache.solr.handler.admin.LogLevelHandlerTest=581 +org.apache.solr.handler.admin.LukeRequestHandlerTest=1118,1473,1204,1189,1329,1464 +org.apache.solr.handler.admin.MBeansHandlerTest=395 +org.apache.solr.handler.admin.ShowFileRequestHandlerTest=1335,1137,1149,1356,837,666 +org.apache.solr.handler.admin.SystemInfoHandlerTest=1,1,1,2,2,16 +org.apache.solr.handler.clustering.ClusteringComponentTest=2498,2606,2458,2459,2417,2645 +org.apache.solr.handler.clustering.DistributedClusteringComponentTest=6889,9315,8222,5929,7126,8239 +org.apache.solr.handler.clustering.carrot2.CarrotClusteringEngineTest=2927,3022,2956,2837,2941,3233 +org.apache.solr.handler.component.BadComponentTest=426,401,526,568,570,300 +org.apache.solr.handler.component.DebugComponentTest=503,575,573,821,996,520 +org.apache.solr.handler.component.DistributedQueryElevationComponentTest=2842 +org.apache.solr.handler.component.DistributedSpellCheckComponentTest=9117,11526,10039,11602,12358,10095 +org.apache.solr.handler.component.DistributedTermsComponentTest=10076,9811,9628,9013,9826,18652 +org.apache.solr.handler.component.QueryElevationComponentTest=4732,4506,4729,4419,3770,2932 +org.apache.solr.handler.component.SearchHandlerTest=495,386,531,403,397,366 +org.apache.solr.handler.component.SpellCheckComponentTest=5842,4847,3472,3600,19878,3323 +org.apache.solr.handler.component.StatsComponentTest=1715,2068,1992,1701,1973,2979 +org.apache.solr.handler.component.TermVectorComponentTest=484,410,388,375,384,635 +org.apache.solr.handler.component.TermsComponentTest=1595,608,1169,2046,567,591 +org.apache.solr.handler.dataimport.TestCachedSqlEntityProcessor=35,32,16,31,10,22 +org.apache.solr.handler.dataimport.TestClobTransformer=5,7,5,5,192,9 +org.apache.solr.handler.dataimport.TestContentStreamDataSource=1990,2014,1987,2000,1650,1626 +org.apache.solr.handler.dataimport.TestContextImpl=6,5,7,6,8,10 +org.apache.solr.handler.dataimport.TestDataConfig=149,146,149,142,145,154 +org.apache.solr.handler.dataimport.TestDateFormatTransformer=6,6,5,5,5,12 +org.apache.solr.handler.dataimport.TestDocBuilder=17,16,16,16,17,54 +org.apache.solr.handler.dataimport.TestDocBuilder2=301,300,292,289,303,383 +org.apache.solr.handler.dataimport.TestDocBuilderThreaded=1341,1346,1369,1311,356 +org.apache.solr.handler.dataimport.TestEntityProcessorBase=5,5,5,4,3,6 +org.apache.solr.handler.dataimport.TestEphemeralCache=159,156,150,152,173,185 +org.apache.solr.handler.dataimport.TestErrorHandling=261,246,239,237,281,307 +org.apache.solr.handler.dataimport.TestEvaluatorBag=6,6,6,7,8,17 +org.apache.solr.handler.dataimport.TestFieldReader=18,16,15,16,5,34 +org.apache.solr.handler.dataimport.TestFileListEntityProcessor=45,46,46,45,52,17 +org.apache.solr.handler.dataimport.TestJdbcDataSource=43,39,39,39,39,51 +org.apache.solr.handler.dataimport.TestLineEntityProcessor=7,7,7,6,5,35 +org.apache.solr.handler.dataimport.TestMailEntityProcessor=200,196,203,209,237,264 +org.apache.solr.handler.dataimport.TestNumberFormatTransformer=13,15,13,13,16,22 +org.apache.solr.handler.dataimport.TestPlainTextEntityProcessor=7,7,7,7,6,9 +org.apache.solr.handler.dataimport.TestRegexTransformer=6,15,10,5,6,17 +org.apache.solr.handler.dataimport.TestScriptTransformer=98,43,44,44,48,87 +org.apache.solr.handler.dataimport.TestSolrEntityProcessorEndToEnd=7135,7080,6979,7025,7498,6086 +org.apache.solr.handler.dataimport.TestSolrEntityProcessorUnit=9,8,9,9,11,16 +org.apache.solr.handler.dataimport.TestSortedMapBackedCache=27,25,26,24,11,7 +org.apache.solr.handler.dataimport.TestSqlEntityProcessor=11,9,9,9,8,11 +org.apache.solr.handler.dataimport.TestSqlEntityProcessor2=260,258,243,264,248,235 +org.apache.solr.handler.dataimport.TestSqlEntityProcessorDelta=579,574,585,581,1522,560 +org.apache.solr.handler.dataimport.TestSqlEntityProcessorDelta2=612,632,477,460,387,377 +org.apache.solr.handler.dataimport.TestSqlEntityProcessorDelta3=328,332,324,320,299,286 +org.apache.solr.handler.dataimport.TestSqlEntityProcessorDeltaPrefixedPk=280,269,274,311,282,207 +org.apache.solr.handler.dataimport.TestTemplateString=3,3,2,4,1,4 +org.apache.solr.handler.dataimport.TestTemplateTransformer=3,3,4,3,3,5 +org.apache.solr.handler.dataimport.TestThreaded=179,180,179,178,159 +org.apache.solr.handler.dataimport.TestTikaEntityProcessor=2454,2286,2330,2375,2640,2424 +org.apache.solr.handler.dataimport.TestURLDataSource=4,6,4,3,2,6 +org.apache.solr.handler.dataimport.TestVariableResolver=13,11,13,12,24,26 +org.apache.solr.handler.dataimport.TestXPathEntityProcessor=413,390,397,423,438,400 +org.apache.solr.handler.dataimport.TestXPathRecordReader=36,36,35,36,45,124 +org.apache.solr.handler.extraction.ExtractingRequestHandlerTest=3112,3082,3114,3071,3087,3351 +org.apache.solr.highlight.FastVectorHighlighterTest=647,632,609,492,486,906 +org.apache.solr.highlight.HighlighterConfigTest=353,344,288,583,493,546 +org.apache.solr.highlight.HighlighterTest=1014,851,980,1013,1788,1411 +org.apache.solr.internal.csv.CSVParserTest=28 +org.apache.solr.internal.csv.CSVPrinterTest=642 +org.apache.solr.internal.csv.CSVStrategyTest=23 +org.apache.solr.internal.csv.CSVUtilsTest=12 +org.apache.solr.internal.csv.CharBufferTest=17 +org.apache.solr.internal.csv.ExtendedBufferedReaderTest=12 +org.apache.solr.internal.csv.writer.CSVConfigGuesserTest=5 +org.apache.solr.internal.csv.writer.CSVConfigTest=5 +org.apache.solr.internal.csv.writer.CSVFieldTest=3 +org.apache.solr.internal.csv.writer.CSVWriterTest=6 +org.apache.solr.request.JSONWriterTest=427,442,382,731,472,709 +org.apache.solr.request.SimpleFacetsTest=3676,4663,4225,3916,12265,3945 +org.apache.solr.request.TestBinaryResponseWriter=664,787,674,977,1022,789 +org.apache.solr.request.TestFaceting=6576,7198,7028,7421,26765,7909 +org.apache.solr.request.TestRemoteStreaming=892,893,819,1245,523,724 +org.apache.solr.request.TestWriterPerf=1503,507,1236,1449,429,516 +org.apache.solr.response.TestCSVResponseWriter=352,599,511,405,1580,405 +org.apache.solr.response.TestPHPSerializedResponseWriter=524,533,767,544,403,398 +org.apache.solr.schema.BadIndexSchemaTest=1999,1993,1930,2216,1798,613 +org.apache.solr.schema.CopyFieldTest=1023,291,814,704,428,351 +org.apache.solr.schema.CurrencyFieldTest=698 +org.apache.solr.schema.DateFieldTest=16,17,12,10,16,24 +org.apache.solr.schema.IndexSchemaRuntimeFieldTest=442,420,368,327,447,406 +org.apache.solr.schema.IndexSchemaTest=439,449,470,522,438,1266 +org.apache.solr.schema.MultiTermTest=460,184,365,495,301,333 +org.apache.solr.schema.NotRequiredUniqueKeyTest=123,121,118,121,141,245 +org.apache.solr.schema.NumericFieldsTest=257,307,264,244,445,282 +org.apache.solr.schema.OpenExchangeRatesOrgProviderTest=20 +org.apache.solr.schema.PolyFieldTest=619,822,894,731,1312,598 +org.apache.solr.schema.PrimitiveFieldTypeTest=564 +org.apache.solr.schema.RequiredFieldsTest=463,518,490,531,474,607 +org.apache.solr.schema.TestBinaryField=212,224,229,231,639,250 +org.apache.solr.schema.TestCollationField=9,296,454,380,647,494 +org.apache.solr.schema.TestICUCollationField=2491,2367,2309,232,2482,810 +org.apache.solr.schema.TestICUCollationFieldOptions=2065,1893,1916,1745,1983,1954 +org.apache.solr.schema.TestOmitPositions=554,582,629,823,691,914 +org.apache.solr.schema.UUIDFieldTest=253,254,266,286,28,6 +org.apache.solr.search.QueryParsingTest=378,345,418,480,323,460 +org.apache.solr.search.ReturnFieldsTest=360 +org.apache.solr.search.SpatialFilterTest=644,862,652,855,1565,2630 +org.apache.solr.search.TestDocSet=687,625,601,795,439,434 +org.apache.solr.search.TestExtendedDismaxParser=419,477,476,493,1882,3720 +org.apache.solr.search.TestFastLRUCache=43,55,27,57,91,95 +org.apache.solr.search.TestFiltering=1695,1762,2065,2350,2900,2690 +org.apache.solr.search.TestFoldingMultitermQuery=701,509,667,509,1387,784 +org.apache.solr.search.TestIndexSearcher=565,528,620,558,476,569 +org.apache.solr.search.TestLFUCache=300,332,292,358,1728,377 +org.apache.solr.search.TestLRUCache=6,6,6,6,6,7 +org.apache.solr.search.TestPseudoReturnFields=1054,1145,1323,1178,1298,878 +org.apache.solr.search.TestQueryTypes=558,723,610,554,796,469 +org.apache.solr.search.TestQueryUtils=486,434,408,539,2475,698 +org.apache.solr.search.TestRangeQuery=3052,3155,3319,3579,8876,4956 +org.apache.solr.search.TestRealTimeGet=38908,10904,37602,36269,168006,55307 +org.apache.solr.search.TestRecovery=13035,4203,10753,13229,5746,5212 +org.apache.solr.search.TestSearchPerf=205,330,243,197,234,255 +org.apache.solr.search.TestSolrQueryParser=444,349,322,549,330,721 +org.apache.solr.search.TestSort=2493,2740,2523,2678,2288,4565 +org.apache.solr.search.TestSurroundQueryParser=1635,430,1089,1417,603,406 +org.apache.solr.search.TestValueSourceCache=501,499,652,558,458,539 +org.apache.solr.search.function.SortByFunctionTest=953,813,978,866,3944,865 +org.apache.solr.search.function.TestFunctionQuery=2923,3714,2794,5056,1793,2148 +org.apache.solr.search.function.distance.DistanceFunctionTest=894,706,634,596,2374,1905 +org.apache.solr.search.similarities.TestBM25SimilarityFactory=115,131,107,107,103,104 +org.apache.solr.search.similarities.TestDFRSimilarityFactory=67,76,112,96,101,191 +org.apache.solr.search.similarities.TestDefaultSimilarityFactory=187,75,170,158,78,130 +org.apache.solr.search.similarities.TestIBSimilarityFactory=234,68,122,177,66,85 +org.apache.solr.search.similarities.TestLMDirichletSimilarityFactory=1272,1300,1120,1175,79,134 +org.apache.solr.search.similarities.TestLMJelinekMercerSimilarityFactory=68,55,57,58,131,97 +org.apache.solr.search.similarities.TestPerFieldSimilarity=70,74,71,85,300,112 +org.apache.solr.servlet.CacheHeaderTest=682,714,660,824,2499,551 +org.apache.solr.servlet.DirectSolrConnectionTest=154,168,168,294,156,262 +org.apache.solr.servlet.NoCacheHeaderTest=775,770,915,713,738,548 +org.apache.solr.servlet.SolrRequestParserTest=1732,1613,1548,2117,1055,536 +org.apache.solr.spelling.DirectSolrSpellCheckerTest=1679,488,1159,1079,494,564 +org.apache.solr.spelling.FileBasedSpellCheckerTest=468,672,574,488,2696,506 +org.apache.solr.spelling.IndexBasedSpellCheckerTest=1367,1364,1401,1352,3147,666 +org.apache.solr.spelling.SpellCheckCollatorTest=889,708,912,964,966,565 +org.apache.solr.spelling.SpellPossibilityIteratorTest=38,37,33,34,50,49 +org.apache.solr.spelling.SpellingQueryConverterTest=6,6,7,9,4,24 +org.apache.solr.spelling.TestSuggestSpellingConverter=7 +org.apache.solr.spelling.suggest.SuggesterFSTTest=2825,2835,2856,2738,1065,743 +org.apache.solr.spelling.suggest.SuggesterTSTTest=632,855,796,879,1241,2627 +org.apache.solr.spelling.suggest.SuggesterTest=1980,605,1516,1304,3122,680 +org.apache.solr.spelling.suggest.SuggesterWFSTTest=571,750 +org.apache.solr.spelling.suggest.TestPhraseSuggestions=103 +org.apache.solr.uima.analysis.UIMAAnnotationsTokenizerFactoryTest=5499 +org.apache.solr.uima.analysis.UIMATypeAwareAnnotationsTokenizerFactoryTest=5442 +org.apache.solr.uima.processor.UIMAUpdateRequestProcessorTest=3940,3844,4066,3891,3873,5537 +org.apache.solr.update.AutoCommitTest=9271,9145,9368,9550,8352,9675 +org.apache.solr.update.DirectUpdateHandlerOptimizeTest=500,411,403,723,478,511 +org.apache.solr.update.DirectUpdateHandlerTest=3903,2586,3609,5868,1651,1709 +org.apache.solr.update.DocumentBuilderTest=966,621,903,1161,415,495 +org.apache.solr.update.PeerSyncTest=3435,3504,3263,3222,3232,2744 +org.apache.solr.update.SoftAutoCommitTest=10112,9973,10083,9950,11332,10454 +org.apache.solr.update.SolrCmdDistributorTest=1604,1613,1471,1342,1055,1004 +org.apache.solr.update.SolrIndexConfigTest=631 +org.apache.solr.update.TestIndexingPerformance=1260,530,1222,1755,899,665 +org.apache.solr.update.UpdateParamsTest=332,354,372,530,323,551 +org.apache.solr.update.processor.FieldMutatingUpdateProcessorTest=608,722,527,650,572,662 +org.apache.solr.update.processor.LangDetectLanguageIdentifierUpdateProcessorFactoryTest=2033,2090,2026,2078,2103,1977 +org.apache.solr.update.processor.SignatureUpdateProcessorFactoryTest=1282,1245,1174,1191,2107,1773 +org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactoryTest=2267,2271,2217,2231,2222,2102 +org.apache.solr.update.processor.URLClassifyProcessorTest=49 +org.apache.solr.update.processor.UniqFieldsUpdateProcessorFactoryTest=940,365,1044,1078,346,391 +org.apache.solr.update.processor.UpdateRequestProcessorFactoryTest=378,332,375,381,1408,386 +org.apache.solr.util.DOMUtilTest=10 +org.apache.solr.util.DateMathParserTest=38,37,42,40,36,137 +org.apache.solr.util.FileUtilsTest=3 +org.apache.solr.util.PrimUtilsTest=27,29,24,27,14,54 +org.apache.solr.util.SolrPluginUtilsTest=517,533,500,522,346,554 +org.apache.solr.util.TestNumberUtils=210,250,290,354,176,210 +org.apache.solr.util.TestSystemIdResolver=13 +org.apache.solr.util.TestUtils=7,8,8,9,5,37 +org.apache.solr.velocity.VelocityResponseWriterTest=975,972,968,978,965,1273 +org.egothor.stemmer.TestCompile=239,269,209,186,266,241 +org.egothor.stemmer.TestStemmer=199,210,224,227,168,279 diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java index 66fe05b0f7d..e33aca65d77 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java @@ -96,6 +96,6 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ArabicAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new ArabicAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java index 9296e317389..85df41d2dc8 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java @@ -76,6 +76,6 @@ public class TestBulgarianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new BulgarianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new BulgarianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java index 427015fd504..4bccc1a3466 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java @@ -162,7 +162,7 @@ public class TestBrazilianStemmer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new BrazilianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new BrazilianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java index 2680819a9e4..eb29296c7e3 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java @@ -58,6 +58,6 @@ public class TestCatalanAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new CatalanAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new CatalanAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java index ccbcbb35508..511b1f4bf36 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java @@ -509,16 +509,16 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testRandom() throws Exception { int numRounds = RANDOM_MULTIPLIER * 10000; - checkRandomData(random, newTestAnalyzer(), numRounds); + checkRandomData(random(), newTestAnalyzer(), numRounds); } public void testRandomHugeStrings() throws Exception { int numRounds = RANDOM_MULTIPLIER * 200; - checkRandomData(random, newTestAnalyzer(), numRounds, 8192); + checkRandomData(random(), newTestAnalyzer(), numRounds, 8192); } public void testCloseBR() throws Exception { - checkAnalysisConsistency(random, newTestAnalyzer(), random.nextBoolean(), " Secretary)
[[M"); + checkAnalysisConsistency(random(), newTestAnalyzer(), random().nextBoolean(), " Secretary)
[[M"); } public void testServerSideIncludes() throws Exception { @@ -787,8 +787,8 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testRandomBrokenHTML() throws Exception { int maxNumElements = 10000; - String text = _TestUtil.randomHtmlishString(random, maxNumElements); - checkAnalysisConsistency(random, newTestAnalyzer(), random.nextBoolean(), text); + String text = _TestUtil.randomHtmlishString(random(), maxNumElements); + checkAnalysisConsistency(random(), newTestAnalyzer(), random().nextBoolean(), text); } public void testRandomText() throws Exception { @@ -797,11 +797,11 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { int maxNumWords = 10000; int minWordLength = 3; int maxWordLength = 20; - int numWords = _TestUtil.nextInt(random, minNumWords, maxNumWords); - switch (_TestUtil.nextInt(random, 0, 4)) { + int numWords = _TestUtil.nextInt(random(), minNumWords, maxNumWords); + switch (_TestUtil.nextInt(random(), 0, 4)) { case 0: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomUnicodeString(random, maxWordLength)); + text.append(_TestUtil.randomUnicodeString(random(), maxWordLength)); text.append(' '); } break; @@ -809,14 +809,14 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { case 1: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { text.append(_TestUtil.randomRealisticUnicodeString - (random, minWordLength, maxWordLength)); + (random(), minWordLength, maxWordLength)); text.append(' '); } break; } default: { // ASCII 50% of the time for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomSimpleString(random)); + text.append(_TestUtil.randomSimpleString(random())); text.append(' '); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java index a29674c1449..61c24ed4cbe 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java @@ -20,6 +20,7 @@ package org.apache.lucene.analysis.charfilter; import java.io.Reader; import java.io.StringReader; import java.util.HashSet; +import java.util.Random; import java.util.Set; import org.apache.lucene.analysis.Analyzer; @@ -191,7 +192,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { }; int numRounds = RANDOM_MULTIPLIER * 10000; - checkRandomData(random, analyzer, numRounds); + checkRandomData(random(), analyzer, numRounds); } //@Ignore("wrong finalOffset: https://issues.apache.org/jira/browse/LUCENE-3971") @@ -215,7 +216,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { }; String text = "gzw f quaxot"; - checkAnalysisConsistency(random, analyzer, false, text); + checkAnalysisConsistency(random(), analyzer, false, text); } //@Ignore("wrong finalOffset: https://issues.apache.org/jira/browse/LUCENE-3971") @@ -235,11 +236,12 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { } }; int numRounds = RANDOM_MULTIPLIER * 100; - checkRandomData(random, analyzer, numRounds); + checkRandomData(random(), analyzer, numRounds); } } private NormalizeCharMap randomMap() { + Random random = random(); NormalizeCharMap map = new NormalizeCharMap(); // we can't add duplicate keys, or NormalizeCharMap gets angry Set keys = new HashSet(); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java index 26803de878a..ec75c4f465d 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java @@ -19,6 +19,7 @@ package org.apache.lucene.analysis.cjk; import java.io.IOException; import java.io.Reader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -270,11 +271,12 @@ public class TestCJKAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new CJKAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new CJKAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } /** blast some random strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); checkRandomData(random, new CJKAnalyzer(TEST_VERSION_CURRENT), 200*RANDOM_MULTIPLIER, 8192); } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java index b3910882745..6a0f0ab024d 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java @@ -276,6 +276,6 @@ public class TestCJKTokenizer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new CJKAnalyzer(Version.LUCENE_30), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new CJKAnalyzer(Version.LUCENE_30), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java index c3d54e381b5..d27c3c9f95b 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java @@ -63,7 +63,7 @@ public class TestCJKWidthFilter extends BaseTokenStreamTestCase { } public void testRandomData() throws IOException { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java index 29d1aeb7532..f0a32c0223a 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java @@ -120,7 +120,7 @@ public class TestChineseTokenizer extends BaseTokenStreamTestCase /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ChineseAnalyzer(), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new ChineseAnalyzer(), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java index 56ff278f7b5..1010bb816e4 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java @@ -319,7 +319,7 @@ public class CommonGramsFilterTest extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @@ -331,6 +331,6 @@ public class CommonGramsFilterTest extends BaseTokenStreamTestCase { } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), b, 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java index 58dfe708625..c926b9d5dd7 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java @@ -347,7 +347,7 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT, tokenizer, dict)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); InputSource is = new InputSource(getClass().getResource("da_UTF8.xml").toExternalForm()); final HyphenationTree hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is); @@ -360,7 +360,7 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, filter); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), b, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws Exception { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java index 842701bd754..39226b5d8db 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java @@ -20,6 +20,7 @@ package org.apache.lucene.analysis.core; import java.io.IOException; import java.io.Reader; import java.io.StringReader; +import java.util.Random; import org.apache.lucene.analysis.*; import org.apache.lucene.analysis.standard.StandardTokenizer; @@ -210,13 +211,14 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); - checkRandomData(random, new SimpleAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); - checkRandomData(random, new StopAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new SimpleAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new StopAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); checkRandomData(random, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 200*RANDOM_MULTIPLIER, 8192); checkRandomData(random, new SimpleAnalyzer(TEST_VERSION_CURRENT), 200*RANDOM_MULTIPLIER, 8192); checkRandomData(random, new StopAnalyzer(TEST_VERSION_CURRENT), 200*RANDOM_MULTIPLIER, 8192); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java index 80f114f65ff..3ab1f375f63 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java @@ -19,6 +19,7 @@ import org.apache.lucene.util.Version; import java.io.IOException; import java.util.Arrays; +import java.util.Random; /** @@ -314,11 +315,12 @@ public class TestClassicAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ClassicAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new ClassicAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); checkRandomData(random, new ClassicAnalyzer(TEST_VERSION_CURRENT), 200*RANDOM_MULTIPLIER, 8192); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java index 561f075b6c3..a17d9840c9c 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java @@ -19,6 +19,7 @@ package org.apache.lucene.analysis.core; import java.io.Reader; import java.io.StringReader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; @@ -64,6 +65,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } public void testLetterAscii() throws Exception { + Random random = random(); Analyzer left = new MockAnalyzer(random, jvmLetter, false); Analyzer right = new Analyzer() { @Override @@ -81,6 +83,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { // not so useful since its all one token?! public void testLetterAsciiHuge() throws Exception { + Random random = random(); int maxLength = 8192; // CharTokenizer.IO_BUFFER_SIZE*2 MockAnalyzer left = new MockAnalyzer(random, jvmLetter, false); left.setMaxTokenLength(255); // match CharTokenizer's max token length @@ -100,6 +103,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } public void testLetterHtmlish() throws Exception { + Random random = random(); Analyzer left = new MockAnalyzer(random, jvmLetter, false); Analyzer right = new Analyzer() { @Override @@ -116,6 +120,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } public void testLetterHtmlishHuge() throws Exception { + Random random = random(); int maxLength = 2048; // this is number of elements, not chars! MockAnalyzer left = new MockAnalyzer(random, jvmLetter, false); left.setMaxTokenLength(255); // match CharTokenizer's max token length @@ -135,7 +140,8 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } public void testLetterUnicode() throws Exception { - Analyzer left = new MockAnalyzer(random, jvmLetter, false); + Random random = random(); + Analyzer left = new MockAnalyzer(random(), jvmLetter, false); Analyzer right = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { @@ -151,6 +157,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } public void testLetterUnicodeHuge() throws Exception { + Random random = random(); int maxLength = 8192; // CharTokenizer.IO_BUFFER_SIZE*2 MockAnalyzer left = new MockAnalyzer(random, jvmLetter, false); left.setMaxTokenLength(255); // match CharTokenizer's max token length @@ -198,6 +205,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { // TODO: maybe push this out to _TestUtil or LuceneTestCase and always use it instead? private static Reader newStringReader(String s) { + Random random = random(); Reader r = new StringReader(s); if (random.nextBoolean()) { r = new MockReaderWrapper(random, r); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java index faf2279a3b6..0d1b36ac09e 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java @@ -96,7 +96,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { writer.close(); IndexReader reader = IndexReader.open(dir); - DocsEnum td = _TestUtil.docs(random, + DocsEnum td = _TestUtil.docs(random(), reader, "partnum", new BytesRef("Q36"), @@ -104,7 +104,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { null, false); assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); - td = _TestUtil.docs(random, + td = _TestUtil.docs(random(), reader, "partnum", new BytesRef("Q37"), @@ -126,6 +126,6 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new KeywordAnalyzer(), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new KeywordAnalyzer(), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java index eeeaf0e801d..89fa3fc7cc9 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java @@ -810,6 +810,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { public void testRandomChains() throws Throwable { int numIterations = atLeast(20); + Random random = random(); for (int i = 0; i < numIterations; i++) { MockRandomAnalyzer a = new MockRandomAnalyzer(random.nextLong()); if (VERBOSE) { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java index cbb3ef0dcf1..3fe6e6f19e0 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.Reader; import java.io.StringReader; import java.util.Arrays; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -242,22 +243,24 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new StandardAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new StandardAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); checkRandomData(random, new StandardAnalyzer(TEST_VERSION_CURRENT), 200*RANDOM_MULTIPLIER, 8192); } // Adds random graph after: public void testRandomHugeStringsGraphAfter() throws Exception { + Random random = random(); checkRandomData(random, new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, reader); - TokenStream tokenStream = new MockGraphTokenFilter(random, tokenizer); + TokenStream tokenStream = new MockGraphTokenFilter(random(), tokenizer); return new TokenStreamComponents(tokenizer, tokenStream); } }, diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailAnalyzer.java index b07da800ad0..0b6ec54ecb6 100755 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailAnalyzer.java @@ -262,6 +262,6 @@ public class TestUAX29URLEmailAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new UAX29URLEmailAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new UAX29URLEmailAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java index 62677ea8d48..3da04e1c3b4 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java @@ -18,6 +18,7 @@ import java.io.StringReader; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Random; /** * Licensed to the Apache Software Foundation (ASF) under one or more @@ -472,11 +473,12 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); checkRandomData(random, a, 200*RANDOM_MULTIPLIER, 8192); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java index d728bc852f4..a81447c02cc 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java @@ -70,6 +70,6 @@ public class TestCzechAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new CzechAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new CzechAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java index d39409a0ba7..606b95b849f 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java @@ -51,6 +51,6 @@ public class TestDanishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new DanishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new DanishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java index 0565e7ba501..86c7de425c9 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java @@ -66,6 +66,6 @@ public class TestGermanAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new GermanAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new GermanAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java index c5a5fb991af..defed5bb999 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java @@ -48,7 +48,7 @@ public class TestGermanLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java index 5bb55bfe76f..794455d69f4 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java @@ -60,7 +60,7 @@ public class TestGermanMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilter.java index 93c7ca59824..6d31653fee9 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilter.java @@ -64,7 +64,7 @@ public class TestGermanNormalizationFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java index c9b3590728c..f05b1ebb9dc 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java @@ -60,7 +60,7 @@ public class TestGermanStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java index 88e924e9143..37515b1ba80 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java @@ -90,6 +90,6 @@ public class GreekAnalyzerTest extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new GreekAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new GreekAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java index baf3ba7e10b..e678b454a30 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java @@ -55,6 +55,6 @@ public class TestEnglishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new EnglishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new EnglishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java index d40db25306c..f14179ad31c 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java @@ -54,7 +54,7 @@ public class TestEnglishMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java index 806952a3c1e..292f3ea0d24 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java @@ -42,7 +42,7 @@ public class TestKStemmer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } /** diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java index 5c3792072f6..3dc2a913849 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java @@ -63,7 +63,7 @@ public class TestPorterStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java index e4fd9f63a82..b1b8fe4004d 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java @@ -51,6 +51,6 @@ public class TestSpanishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new SpanishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new SpanishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java index 147f7bb9e02..685cefd2f5d 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java @@ -48,7 +48,7 @@ public class TestSpanishLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/eu/TestBasqueAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/eu/TestBasqueAnalyzer.java index 8461d545665..7c67194d569 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/eu/TestBasqueAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/eu/TestBasqueAnalyzer.java @@ -51,6 +51,6 @@ public class TestBasqueAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new BasqueAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new BasqueAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java index 9e4022ad6f1..5ec5c8b17db 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java @@ -224,6 +224,6 @@ public class TestPersianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new PersianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new PersianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java index 0c24d8be361..f11bfd70ebd 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java @@ -51,6 +51,6 @@ public class TestFinnishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new FinnishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new FinnishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java index 85b15beaab7..c7c08401ed0 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java @@ -48,7 +48,7 @@ public class TestFinnishLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java index 0d80d6adf76..48b031d8e12 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java @@ -263,7 +263,7 @@ public class TestFrenchAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new FrenchAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new FrenchAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } /** test accent-insensitive */ diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java index 75ec0765d12..6012e8282a9 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java @@ -162,7 +162,7 @@ public class TestFrenchLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java index 3ea0813d166..12f99987421 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java @@ -62,7 +62,7 @@ public class TestFrenchMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/ga/TestIrishAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/ga/TestIrishAnalyzer.java index f7de2db4a98..2f3538e5572 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/ga/TestIrishAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/ga/TestIrishAnalyzer.java @@ -66,6 +66,6 @@ public class TestIrishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new IrishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new IrishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianAnalyzer.java index 8f9ea8e4da1..412e0bda998 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianAnalyzer.java @@ -51,6 +51,6 @@ public class TestGalicianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new GalicianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new GalicianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilter.java index 7dd2d5b7f36..59e1b5d1732 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilter.java @@ -52,7 +52,7 @@ public class TestGalicianMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java index 393226db6b4..6bded3cd667 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java @@ -47,6 +47,6 @@ public class TestHindiAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new HindiAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new HindiAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java index 5f7e22376e5..43079aa7528 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java @@ -51,6 +51,6 @@ public class TestHungarianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new HungarianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new HungarianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java index 70f2be08342..beb2e6d7580 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java @@ -72,7 +72,7 @@ public class HunspellStemFilterTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, DICTIONARY)); } }; - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/hy/TestArmenianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/hy/TestArmenianAnalyzer.java index 7bb72c701b3..929fa022381 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/hy/TestArmenianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/hy/TestArmenianAnalyzer.java @@ -51,6 +51,6 @@ public class TestArmenianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ArmenianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new ArmenianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianAnalyzer.java index 0967ed6c600..b1176e65cd9 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianAnalyzer.java @@ -51,6 +51,6 @@ public class TestIndonesianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new IndonesianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new IndonesianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java index 079ce8f0883..63665771f97 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java @@ -54,7 +54,7 @@ public class TestItalianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ItalianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new ItalianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } /** test that the elisionfilter is working */ diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java index 44ee495030a..f9e293aba4e 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java @@ -48,7 +48,7 @@ public class TestItalianLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianAnalyzer.java index 2f7ff13946b..e20611140bf 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianAnalyzer.java @@ -51,6 +51,6 @@ public class TestLatvianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new LatvianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new LatvianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java index fbef3205df3..64560d0bfe7 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java @@ -152,7 +152,7 @@ public class PatternAnalyzerTest extends BaseTokenStreamTestCase { try { Thread.getDefaultUncaughtExceptionHandler(); - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } catch (ArrayIndexOutOfBoundsException ex) { assumeTrue("not failing due to jre bug ", !isJREBug7104012(ex)); throw ex; // otherwise rethrow diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java index 322704056a0..2c358905cdf 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java @@ -1923,7 +1923,7 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new ASCIIFoldingFilter(tokenizer)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java index b6edc6aeb05..e33c3db650a 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java @@ -132,7 +132,7 @@ public class TestCapitalizationFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java index e1347803d23..7d1450dbf9b 100755 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java @@ -74,7 +74,7 @@ public class TestHyphenatedWordsFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java index 667617abe29..f90467af02d 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java @@ -77,6 +77,6 @@ public class TestKeepWordFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java index 61f6b7afd04..8a561b2230c 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java @@ -50,7 +50,7 @@ public class TestLimitTokenCountAnalyzer extends BaseTokenStreamTestCase { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new LimitTokenCountAnalyzer(new MockAnalyzer(random), 100000))); + TEST_VERSION_CURRENT, new LimitTokenCountAnalyzer(new MockAnalyzer(random()), 100000))); Document doc = new Document(); StringBuilder b = new StringBuilder(); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java index 96df848a6d4..ed10cefef4e 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java @@ -129,7 +129,7 @@ public class TestRemoveDuplicatesTokenFilter extends BaseTokenStreamTestCase { // some helper methods for the below test with synonyms private String randomNonEmptyString() { while(true) { - final String s = _TestUtil.randomUnicodeString(random).trim(); + final String s = _TestUtil.randomUnicodeString(random()).trim(); if (s.length() != 0 && s.indexOf('\u0000') == -1) { return s; } @@ -146,13 +146,13 @@ public class TestRemoveDuplicatesTokenFilter extends BaseTokenStreamTestCase { public void testRandomStrings() throws Exception { final int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { - SynonymMap.Builder b = new SynonymMap.Builder(random.nextBoolean()); + SynonymMap.Builder b = new SynonymMap.Builder(random().nextBoolean()); final int numEntries = atLeast(10); for (int j = 0; j < numEntries; j++) { - add(b, randomNonEmptyString(), randomNonEmptyString(), random.nextBoolean()); + add(b, randomNonEmptyString(), randomNonEmptyString(), random().nextBoolean()); } final SynonymMap map = b.build(); - final boolean ignoreCase = random.nextBoolean(); + final boolean ignoreCase = random().nextBoolean(); final Analyzer analyzer = new Analyzer() { @Override @@ -163,7 +163,7 @@ public class TestRemoveDuplicatesTokenFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, analyzer, 1000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java index e3e8813601e..2cecfc54e0b 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java @@ -123,7 +123,7 @@ public class TestTrimFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new TrimFilter(tokenizer, false)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @@ -133,7 +133,7 @@ public class TestTrimFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new TrimFilter(tokenizer, true)); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), b, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { @@ -141,7 +141,7 @@ public class TestTrimFilter extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new KeywordTokenizer(reader); - return new TokenStreamComponents(tokenizer, new TrimFilter(tokenizer, random.nextBoolean())); + return new TokenStreamComponents(tokenizer, new TrimFilter(tokenizer, random().nextBoolean())); } }; checkOneTermReuse(a, "", ""); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java index 54e68ab77e8..1e98c3b2d36 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java @@ -31,8 +31,7 @@ import org.junit.Test; import java.io.IOException; import java.io.Reader; import java.io.StringReader; -import java.util.Arrays; -import java.util.HashSet; +import java.util.*; import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.*; import static org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE; @@ -336,9 +335,9 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase { public void testRandomStrings() throws Exception { int numIterations = atLeast(5); for (int i = 0; i < numIterations; i++) { - final int flags = random.nextInt(512); + final int flags = random().nextInt(512); final CharArraySet protectedWords; - if (random.nextBoolean()) { + if (random().nextBoolean()) { protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet(Arrays.asList("a", "b", "cd")), false); } else { protectedWords = null; @@ -352,11 +351,12 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(tokenizer, flags, protectedWords)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER, 20, false, false); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER, 20, false, false); } } public void testEmptyTerm() throws IOException { + Random random = random(); for (int i = 0; i < 512; i++) { final int flags = i; final CharArraySet protectedWords; diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java index adb887059fc..450727a341f 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java @@ -29,6 +29,7 @@ import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter; import java.io.Reader; import java.io.StringReader; +import java.util.Random; /** * Tests {@link EdgeNGramTokenFilter} for correctness. @@ -149,7 +150,7 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase { new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 2, 15)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @Override @@ -159,10 +160,11 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase { new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.BACK, 2, 15)); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER, 20, false, false); + checkRandomData(random(), b, 10000*RANDOM_MULTIPLIER, 20, false, false); } public void testEmptyTerm() throws Exception { + Random random = random(); Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java index 158c603a91c..09a4a287de8 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java @@ -109,8 +109,8 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER, 20, false, false); - checkRandomData(random, a, 200*RANDOM_MULTIPLIER, 8192, false, false); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER, 20, false, false); + checkRandomData(random(), a, 200*RANDOM_MULTIPLIER, 8192, false, false); Analyzer b = new Analyzer() { @Override @@ -119,7 +119,7 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER, 20, false, false); - checkRandomData(random, b, 200*RANDOM_MULTIPLIER, 8192, false, false); + checkRandomData(random(), b, 10000*RANDOM_MULTIPLIER, 20, false, false); + checkRandomData(random(), b, 200*RANDOM_MULTIPLIER, 8192, false, false); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java index f5f3071e43f..8acc3ff4528 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java @@ -29,6 +29,7 @@ import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter; import java.io.Reader; import java.io.StringReader; +import java.util.Random; /** * Tests {@link NGramTokenFilter} for correctness. @@ -131,10 +132,11 @@ public class NGramTokenFilterTest extends BaseTokenStreamTestCase { new NGramTokenFilter(tokenizer, 2, 15)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER, 20, false, false); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER, 20, false, false); } public void testEmptyTerm() throws Exception { + Random random = random(); Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java index 86a97828e6c..0d0ac484602 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java @@ -102,7 +102,7 @@ public class NGramTokenizerTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER, 20, false, false); - checkRandomData(random, a, 200*RANDOM_MULTIPLIER, 8192, false, false); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER, 20, false, false); + checkRandomData(random(), a, 200*RANDOM_MULTIPLIER, 8192, false, false); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java index cd91cbc3e35..dacc3067bf6 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java @@ -194,7 +194,7 @@ public class TestDutchStemmer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new DutchAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new DutchAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } \ No newline at end of file diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java index acf6e0b9691..cf61dce59eb 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java @@ -51,6 +51,6 @@ public class TestNorwegianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new NorwegianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new NorwegianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java index 8dcca3502e3..0ace6b4db3a 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java @@ -20,6 +20,7 @@ package org.apache.lucene.analysis.no; import java.io.FileInputStream; import java.io.IOException; import java.io.Reader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -49,6 +50,7 @@ public class TestNorwegianLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { + Random random = random(); checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java index 0c137a52bcd..40a8390204e 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java @@ -20,6 +20,7 @@ package org.apache.lucene.analysis.no; import java.io.FileInputStream; import java.io.IOException; import java.io.Reader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -49,6 +50,7 @@ public class TestNorwegianMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { + Random random = random(); checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java index 48f7e4730bc..9c3ac239cc3 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java @@ -19,6 +19,7 @@ package org.apache.lucene.analysis.path; import java.io.Reader; import java.io.StringReader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -206,11 +207,12 @@ public class TestPathHierarchyTokenizer extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestReversePathHierarchyTokenizer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestReversePathHierarchyTokenizer.java index d06c972e680..cedf0adf924 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestReversePathHierarchyTokenizer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestReversePathHierarchyTokenizer.java @@ -19,6 +19,7 @@ package org.apache.lucene.analysis.path; import java.io.Reader; import java.io.StringReader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -168,11 +169,12 @@ public class TestReversePathHierarchyTokenizer extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java index e0fb7ec09bf..9341fafb052 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java @@ -20,6 +20,7 @@ package org.apache.lucene.analysis.pattern; import java.io.IOException; import java.io.Reader; import java.io.StringReader; +import java.util.Random; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; @@ -303,6 +304,7 @@ public class TestPatternReplaceCharFilter extends BaseTokenStreamTestCase { int numPatterns = atLeast(100); long start = System.currentTimeMillis(); long maxTime = 1000 * 2; + Random random = new Random(random().nextLong()); for (int i = 0; i < numPatterns && start + maxTime > System.currentTimeMillis(); i++) { final Pattern p = randomPattern(); final String replacement = _TestUtil.randomSimpleString(random); @@ -324,10 +326,10 @@ public class TestPatternReplaceCharFilter extends BaseTokenStreamTestCase { } } - public static Pattern randomPattern() { + public Pattern randomPattern() { while (true) { try { - return Pattern.compile(_TestUtil.randomRegexpishString(random)); + return Pattern.compile(_TestUtil.randomRegexpishString(random())); } catch (PatternSyntaxException ignored) { // if at first you don't succeed... } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilter.java index b2f1ee1f93b..a81fa8c78c5 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilter.java @@ -93,7 +93,7 @@ public class TestPatternReplaceFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, filter); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @Override @@ -103,7 +103,7 @@ public class TestPatternReplaceFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, filter); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), b, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java index 66c08815f16..87eb49b7acf 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java @@ -137,7 +137,7 @@ public class TestPatternTokenizer extends BaseTokenStreamTestCase return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @Override @@ -151,6 +151,6 @@ public class TestPatternTokenizer extends BaseTokenStreamTestCase return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), b, 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java index 8c96b2bbc7c..c139e270f05 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java @@ -51,6 +51,6 @@ public class TestPortugueseAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new PortugueseAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new PortugueseAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java index 943472fbf63..5f41e08bb60 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java @@ -95,7 +95,7 @@ public class TestPortugueseLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java index 2e0fef348f7..ce85f1960ee 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java @@ -69,7 +69,7 @@ public class TestPortugueseMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java index 4c55abbb906..a0a5d18158d 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java @@ -69,7 +69,7 @@ public class TestPortugueseStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java index 47b0f11cccc..b6551f62b2a 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java @@ -41,7 +41,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase { public void setUp() throws Exception { super.setUp(); dir = new RAMDirectory(); - appAnalyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + appAnalyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, appAnalyzer)); int numDocs = 200; for (int i = 0; i < numDocs; i++) { @@ -132,7 +132,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase { public void testTokenStream() throws Exception { QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer( TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), reader, 10); + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), reader, 10); TokenStream ts = a.tokenStream("repetitiveField", new StringReader("this boring")); assertTokenStreamContents(ts, new String[] { "this" }); } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java index f7ee3ce60b2..29c8d533689 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java @@ -111,7 +111,7 @@ public class TestReverseStringFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new ReverseStringFilter(TEST_VERSION_CURRENT, tokenizer)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java index 10bfa438660..976c17568e8 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java @@ -51,6 +51,6 @@ public class TestRomanianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new RomanianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new RomanianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java index b0534e816c0..2beade8e945 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java @@ -67,6 +67,6 @@ public class TestRussianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new RussianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new RussianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java index 5e86fee53fa..da1807d68cf 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java @@ -48,7 +48,7 @@ public class TestRussianLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java index a3c4878a6a6..0e02076c203 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java @@ -55,7 +55,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { @Override public void setUp() throws Exception { super.setUp(); - analyzer = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 2); + analyzer = new ShingleAnalyzerWrapper(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), 2); directory = newDirectory(); IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); @@ -142,7 +142,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { } public void testReusableTokenStream() throws Exception { - Analyzer a = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 2); + Analyzer a = new ShingleAnalyzerWrapper(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), 2); assertAnalyzesToReuse(a, "please divide into shingles", new String[] { "please", "please divide", "divide", "divide into", "into", "into shingles", "shingles" }, new int[] { 0, 0, 7, 7, 14, 14, 19 }, @@ -157,7 +157,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { public void testNonDefaultMinShingleSize() throws Exception { ShingleAnalyzerWrapper analyzer - = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 3, 4); + = new ShingleAnalyzerWrapper(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), 3, 4); assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles", new String[] { "please", "please divide this", "please divide this sentence", "divide", "divide this sentence", "divide this sentence into", @@ -170,7 +170,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { new int[] { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1 }); analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 3, 4, ShingleFilter.TOKEN_SEPARATOR, false, false); + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), 3, 4, ShingleFilter.TOKEN_SEPARATOR, false, false); assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles", new String[] { "please divide this", "please divide this sentence", "divide this sentence", "divide this sentence into", @@ -183,7 +183,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { public void testNonDefaultMinAndSameMaxShingleSize() throws Exception { ShingleAnalyzerWrapper analyzer - = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 3, 3); + = new ShingleAnalyzerWrapper(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), 3, 3); assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles", new String[] { "please", "please divide this", "divide", "divide this sentence", @@ -196,7 +196,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { new int[] { 1, 0, 1, 0, 1, 0, 1, 0, 1, 1 }); analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 3, 3, ShingleFilter.TOKEN_SEPARATOR, false, false); + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), 3, 3, ShingleFilter.TOKEN_SEPARATOR, false, false); assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles", new String[] { "please divide this", "divide this sentence", @@ -209,7 +209,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { public void testNoTokenSeparator() throws Exception { ShingleAnalyzerWrapper analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", true, false); @@ -223,7 +223,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { new int[] { 1, 0, 1, 0, 1, 0, 1 }); analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", false, false); @@ -238,7 +238,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { public void testNullTokenSeparator() throws Exception { ShingleAnalyzerWrapper analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, null, true, false); @@ -252,7 +252,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { new int[] { 1, 0, 1, 0, 1, 0, 1 }); analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", false, false); @@ -266,7 +266,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { } public void testAltTokenSeparator() throws Exception { ShingleAnalyzerWrapper analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", true, false); @@ -280,7 +280,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { new int[] { 1, 0, 1, 0, 1, 0, 1 }); analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", false, false); @@ -295,7 +295,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { public void testOutputUnigramsIfNoShinglesSingleToken() throws Exception { ShingleAnalyzerWrapper analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", false, true); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java index 1dadc3d54e5..b4658adf2dc 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java @@ -20,6 +20,7 @@ package org.apache.lucene.analysis.shingle; import java.io.IOException; import java.io.Reader; import java.io.StringReader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -1143,11 +1144,12 @@ public class ShingleFilterTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new ShingleFilter(tokenizer)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java index 2d0b26d477c..b9598c25cce 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java @@ -86,7 +86,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase { // with BaseTokenStreamTestCase now... public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception { Directory dir = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); TokenStream tokenStream = analyzer.tokenStream("field", new StringReader("abcd ")); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java index 15a18412b74..6dd5ad071bb 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java @@ -51,6 +51,6 @@ public class TestSwedishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new SwedishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new SwedishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java index fb5d604324f..e1697fe18ad 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java @@ -48,7 +48,7 @@ public class TestSwedishLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java index f958f013686..6bfe9918448 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java @@ -43,7 +43,7 @@ public class TestSolrSynonymParser extends BaseTokenStreamTestCase { "foo => baz\n" + "this test, that testing"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random())); parser.add(new StringReader(testFile)); final SynonymMap map = parser.build(); @@ -76,7 +76,7 @@ public class TestSolrSynonymParser extends BaseTokenStreamTestCase { @Test(expected=ParseException.class) public void testInvalidDoubleMap() throws Exception { String testFile = "a => b => c"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random())); parser.add(new StringReader(testFile)); } @@ -84,7 +84,7 @@ public class TestSolrSynonymParser extends BaseTokenStreamTestCase { @Test(expected=ParseException.class) public void testInvalidAnalyzesToNothingOutput() throws Exception { String testFile = "a => 1"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random, MockTokenizer.SIMPLE, false)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random(), MockTokenizer.SIMPLE, false)); parser.add(new StringReader(testFile)); } @@ -92,7 +92,7 @@ public class TestSolrSynonymParser extends BaseTokenStreamTestCase { @Test(expected=ParseException.class) public void testInvalidAnalyzesToNothingInput() throws Exception { String testFile = "1 => a"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random, MockTokenizer.SIMPLE, false)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random(), MockTokenizer.SIMPLE, false)); parser.add(new StringReader(testFile)); } @@ -117,7 +117,7 @@ public class TestSolrSynonymParser extends BaseTokenStreamTestCase { String testFile = "a\\=>a => b\\=>b\n" + "a\\,a => b\\,b"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)); parser.add(new StringReader(testFile)); final SynonymMap map = parser.build(); Analyzer analyzer = new Analyzer() { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java index 1891ec829eb..28917a11903 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.Set; import org.apache.lucene.analysis.Analyzer; @@ -202,7 +203,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { assert alphabetSize <= 26; char[] s = new char[2*length]; for(int charIDX=0;charIDX synMap = new HashMap(); final List syns = new ArrayList(); - final boolean dedup = random.nextBoolean(); + final boolean dedup = random().nextBoolean(); if (VERBOSE) { System.out.println(" dedup=" + dedup); } b = new SynonymMap.Builder(dedup); for(int synIDX=0;synIDX(); synMap.put(synIn, s); - s.keepOrig = random.nextBoolean(); + s.keepOrig = random().nextBoolean(); } - final String synOut = getRandomString('0', 10, _TestUtil.nextInt(random, 1, 5)).trim(); + final String synOut = getRandomString('0', 10, _TestUtil.nextInt(random(), 1, 5)).trim(); s.out.add(synOut); add(synIn, synOut, s.keepOrig); if (VERBOSE) { @@ -415,7 +416,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { private String randomNonEmptyString() { while(true) { - final String s = _TestUtil.randomUnicodeString(random).trim(); + final String s = _TestUtil.randomUnicodeString(random()).trim(); if (s.length() != 0 && s.indexOf('\u0000') == -1) { return s; } @@ -428,13 +429,13 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { public void testRandom2() throws Exception { final int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { - b = new SynonymMap.Builder(random.nextBoolean()); + b = new SynonymMap.Builder(random().nextBoolean()); final int numEntries = atLeast(10); for (int j = 0; j < numEntries; j++) { - add(randomNonEmptyString(), randomNonEmptyString(), random.nextBoolean()); + add(randomNonEmptyString(), randomNonEmptyString(), random().nextBoolean()); } final SynonymMap map = b.build(); - final boolean ignoreCase = random.nextBoolean(); + final boolean ignoreCase = random().nextBoolean(); final Analyzer analyzer = new Analyzer() { @Override @@ -444,7 +445,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, analyzer, 1000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER); } } @@ -455,6 +456,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { // Adds MockGraphTokenFilter before SynFilter: public void testRandom2GraphBefore() throws Exception { final int numIters = atLeast(10); + Random random = random(); for (int i = 0; i < numIters; i++) { b = new SynonymMap.Builder(random.nextBoolean()); final int numEntries = atLeast(10); @@ -468,7 +470,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.SIMPLE, true); - TokenStream graph = new MockGraphTokenFilter(random, tokenizer); + TokenStream graph = new MockGraphTokenFilter(random(), tokenizer); return new TokenStreamComponents(tokenizer, new SynonymFilter(graph, map, ignoreCase)); } }; @@ -481,6 +483,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { // Adds MockGraphTokenFilter after SynFilter: public void testRandom2GraphAfter() throws Exception { final int numIters = atLeast(10); + Random random = random(); for (int i = 0; i < numIters; i++) { b = new SynonymMap.Builder(random.nextBoolean()); final int numEntries = atLeast(10); @@ -495,7 +498,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.SIMPLE, true); TokenStream syns = new SynonymFilter(tokenizer, map, ignoreCase); - TokenStream graph = new MockGraphTokenFilter(random, syns); + TokenStream graph = new MockGraphTokenFilter(random(), syns); return new TokenStreamComponents(tokenizer, graph); } }; @@ -505,6 +508,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { } public void testEmptyTerm() throws IOException { + Random random = random(); final int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { b = new SynonymMap.Builder(random.nextBoolean()); @@ -530,6 +534,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { /** simple random test like testRandom2, but for large docs */ public void testRandomHuge() throws Exception { + Random random = random(); final int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { b = new SynonymMap.Builder(random.nextBoolean()); @@ -558,7 +563,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { "aaa => aaaa1 aaaa2 aaaa3\n" + "bbb => bbbb1 bbbb2\n"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random())); parser.add(new StringReader(testFile)); final SynonymMap map = parser.build(); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java index ed3472ee521..d68494fb7f4 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java @@ -41,7 +41,7 @@ public class TestWordnetSynonymParser extends BaseTokenStreamTestCase { "s(100000004,2,'king''s meany',n,1,1).\n"; public void testSynonyms() throws Exception { - WordnetSynonymParser parser = new WordnetSynonymParser(true, true, new MockAnalyzer(random)); + WordnetSynonymParser parser = new WordnetSynonymParser(true, true, new MockAnalyzer(random())); parser.add(new StringReader(synonymsFile)); final SynonymMap map = parser.build(); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java index b50d74ae01d..68da30e55b3 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java @@ -20,6 +20,7 @@ package org.apache.lucene.analysis.th; import java.io.IOException; import java.io.Reader; import java.io.StringReader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -169,11 +170,12 @@ public class TestThaiAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ThaiAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new ThaiAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); checkRandomData(random, new ThaiAnalyzer(TEST_VERSION_CURRENT), 200*RANDOM_MULTIPLIER, 8192); } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java index fce2e6f84e9..3a84c389b96 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java @@ -51,6 +51,6 @@ public class TestTurkishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new TurkishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new TurkishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java index 25c5d6f700c..341eba4c0b8 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java @@ -35,7 +35,7 @@ public class TestCharArrayIterator extends LuceneTestCase { BreakIterator bi = BreakIterator.getWordInstance(); CharArrayIterator ci = CharArrayIterator.newWordInstance(); for (int i = 0; i < 10000; i++) { - char text[] = _TestUtil.randomUnicodeString(random).toCharArray(); + char text[] = _TestUtil.randomUnicodeString(random()).toCharArray(); ci.setText(text, 0, text.length); consume(bi, ci); } @@ -63,7 +63,7 @@ public class TestCharArrayIterator extends LuceneTestCase { BreakIterator bi = BreakIterator.getSentenceInstance(); CharArrayIterator ci = CharArrayIterator.newSentenceInstance(); for (int i = 0; i < 10000; i++) { - char text[] = _TestUtil.randomUnicodeString(random).toCharArray(); + char text[] = _TestUtil.randomUnicodeString(random()).toCharArray(); ci.setText(text, 0, text.length); consume(bi, ci); } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java index 9ff33aafa97..afd0ea261b4 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java @@ -30,15 +30,15 @@ public class TestCharArrayMap extends LuceneTestCase { char[] key; for (int i=0; iSMP and check that offsets are correct @@ -159,7 +159,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { }; int num = 10000 * RANDOM_MULTIPLIER; for (int i = 0; i < num; i++) { - String s = _TestUtil.randomUnicodeString(random); + String s = _TestUtil.randomUnicodeString(random()); TokenStream ts = analyzer.tokenStream("foo", new StringReader(s)); ts.reset(); OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class); @@ -174,6 +174,6 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { ts.close(); } // just for fun - checkRandomData(random, analyzer, num); + checkRandomData(random(), analyzer, num); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java index b9723ea05eb..ccebdd69440 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java @@ -21,6 +21,7 @@ package org.apache.lucene.analysis.wikipedia; import java.io.Reader; import java.io.StringReader; import java.io.IOException; +import java.util.Random; import java.util.Set; import java.util.HashSet; @@ -183,11 +184,12 @@ public class WikipediaTokenizerTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); Analyzer a = new Analyzer() { @Override diff --git a/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java b/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java index 4b134427dfb..d52b7cb0b15 100644 --- a/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java +++ b/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java @@ -76,7 +76,7 @@ public class TestICUFoldingFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java b/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java index 05a5cd7724a..2b0302b96df 100644 --- a/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java +++ b/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java @@ -76,7 +76,7 @@ public class TestICUNormalizer2Filter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java b/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java index 68ddf94b7ca..c1eeb78b8ca 100644 --- a/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java +++ b/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java @@ -98,7 +98,7 @@ public class TestICUTransformFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new ICUTransformFilter(tokenizer, transform)); } }; - checkRandomData(random, a, 1000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java b/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java index 74a0856d5b3..b5ffeff5c64 100644 --- a/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java +++ b/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.io.Reader; import java.io.StringReader; import java.util.Arrays; +import java.util.Random; public class TestICUTokenizer extends BaseTokenStreamTestCase { @@ -234,11 +235,12 @@ public class TestICUTokenizer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); checkRandomData(random, a, 200*RANDOM_MULTIPLIER, 8192); } } diff --git a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java index 3b02fc6e6d9..56184b0d2d7 100644 --- a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java +++ b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java @@ -20,6 +20,7 @@ package org.apache.lucene.analysis.ja; import java.io.IOException; import java.io.Reader; import java.io.StringReader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -50,7 +51,7 @@ public class TestExtendedMode extends BaseTokenStreamTestCase { public void testSurrogates2() throws IOException { int numIterations = atLeast(10000); for (int i = 0; i < numIterations; i++) { - String s = _TestUtil.randomUnicodeString(random, 100); + String s = _TestUtil.randomUnicodeString(random(), 100); TokenStream ts = analyzer.tokenStream("foo", new StringReader(s)); CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); ts.reset(); @@ -62,11 +63,13 @@ public class TestExtendedMode extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { + Random random = random(); checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); checkRandomData(random, analyzer, 200*RANDOM_MULTIPLIER, 8192); } } diff --git a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java index 0e5fe6c0099..6ab2735f440 100644 --- a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java +++ b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java @@ -19,6 +19,7 @@ package org.apache.lucene.analysis.ja; import java.io.IOException; import java.io.StringReader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -124,6 +125,7 @@ public class TestJapaneseAnalyzer extends BaseTokenStreamTestCase { * blast random strings against the analyzer */ public void testRandom() throws IOException { + Random random = random(); final Analyzer a = new JapaneseAnalyzer(TEST_VERSION_CURRENT, null, Mode.SEARCH, JapaneseAnalyzer.getDefaultStopSet(), JapaneseAnalyzer.getDefaultStopTags()); @@ -132,6 +134,7 @@ public class TestJapaneseAnalyzer extends BaseTokenStreamTestCase { /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); final Analyzer a = new JapaneseAnalyzer(TEST_VERSION_CURRENT, null, Mode.SEARCH, JapaneseAnalyzer.getDefaultStopSet(), JapaneseAnalyzer.getDefaultStopTags()); @@ -157,6 +160,7 @@ public class TestJapaneseAnalyzer extends BaseTokenStreamTestCase { // LUCENE-3897: this string (found by running all jawiki // XML through JapaneseAnalyzer) caused AIOOBE public void testCuriousString() throws Exception { + Random random = random(); final String s = "<li>06:26 2004年3月21日 [[利用者:Kzhr|Kzhr]] "お菓子な家族" を削除しました <em><nowiki>(即時削除: 悪戯。内容: &#39;KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK&#39;)</nowiki></em></li>"; final Analyzer a = new JapaneseAnalyzer(TEST_VERSION_CURRENT, null, Mode.SEARCH, JapaneseAnalyzer.getDefaultStopSet(), @@ -167,6 +171,7 @@ public class TestJapaneseAnalyzer extends BaseTokenStreamTestCase { // LUCENE-3897: this string (found by // testHugeRandomStrings) tripped assert public void testAnotherCuriousString() throws Exception { + Random random = random(); final String s = "《〔〘〝」〩〄〯』〴〷〦〯〹】〰。 〆。〡〢〲〆〤〫〱 〜々〲〿〄》〃】〚〗〪〓〨々〮〹〟〯〫』」〨〒〜〃〃〡 〸〜〱〆〿「〱〳。〷〆〃〷〇〛〥〒〖〪〕〦〚〉〷〼〣〒。〕〣〻〒〻〼〔〸〿〖〖〆々〭《〟〚〇〕〸〲〄〿〙」〞〖〪〬〤【〵〘〃々〦〮〠〦〛〲〝〿〽〓〺〷〛》〛『》〇 〽〄〱〙〥〠』〨〉〨〔」》〮〥〽〔〰〄〶】〠〶〨〔々『。〞〙〮》【 〯〦〯〩〩〈〿〫〘〒》』〾〰〰〼〒「〝〰〱〞〹〔〪〭、〬〴【』〧〩】〈。〧〤〢〨〶〄〴〡。〪〭〞〷〣〘〳〄〬〙『 「」【〮〯〔〱〬〴〵〭〬〚〱、〚〣、〚〓〮、〚々】〼〿〦〫〛〲〆〕々。〨〩〇〫〵『『〣〮〜〫〃】〡〯』〆〫〺〻〬〺、〗】〓〕〶〇〞〬。」〃〮〇〞〷〰〲】〆〻。〬〻〄〜〃〲〺〧〘〇〈、〃〚〇〉「〬〣〨〮〆〴〻〒〖〄〒〳〗〶、〙「 〫〚《〩〆〱〡【〶』【〆〫】〢》〔。〵〴〽々〱〖〳〶〱《〈〒』『〝〘【〈〢〝〠〣「〤〆〢〈〚〕〿〣々〢〹〉〡 〷《〤〴『々〉〤〬《』々〾〔〚〆〔〴〪〩〸〦』〉〃 《〼〇〆〾〛〿」〧〝〽〘〠〻【〰〨〥《〯〝〩〩〱〇〳々〚〉〔『〹〳〳』〲『〣」〯〓【々〮〥〃〿〳〞〦〦〶〓〬〛〬〈〈〠『〜〥〒〯〜〜〹〲【〓〪《々〗〚〇〜〄〦『々〃〒〇〖〢〉〹〮〩〽『》〵〔】〣〮】〧、〇〰〒】《〈〆々〾〣【〾〲〘〧『〇〲〼〕〙「〪〆〚々〦〯〵〇〤〆〡」〪》〼』〴〶〪】『〲〢〭〬〈〠〮〽〓〔〧〖」〃〴〬』〣〝〯〣〴『〉〖〄〇〄〰〇〃〤、〤》〔〴〯〫〠〝〷〞〩〛〛〳々〓〟〜〛〜〃 〃〛「、』》》々〢〱〢〸〹〙〃〶〇〮〼」〔〶【〙〮々〣 〵〱〈〡〙〹、〶〘【〘〄〔『〸〵〫〱〈〙〜〸〩〗〷》〽〃〔〕〡〨〆〺〒〧〴〢〈〯〶〼〚〈〪〘〢〘〶〿〾〹〆〉」〠〴〭〉〡〮〫〸〸〦〟〣」〩〶』《〔〨〫〉〃〚〈〡〾〈〵【〼《〴〸〜〜〓《〡〶〫〉〫〼〱〿〢々〩〡〘〓〛〞〖々〢〩「々〦〣】〤〫〼〚〴〡〠〕〴〭。〟「〞》』「、〛〕〤々〈〺〃〸】〶〽〒〓〙》〶〬〸〧〜〲〬〰〪。〞〒【〭〇〢〝〧〰〹〾》〖「〹」〶〕〜〘〿〩〙〺〡〓〆〵〪〬〨〷〯〃】〤〤〞〸》〈〹〖〲〣〬〲〯〗〉〮「〼〨〓々。〭〆〶〩【〦〿》〩〻〢〔〤〟〯【〷〻〚〟」〗《〓〛。〰〃〭〯〘〣》〩〩〆」【〼〡】〳〿〫〳〼〺〶『〟〧』〳〲〔『〦「〳〃〫〷《〟〶〻〪〆〗〲〮〄〨〻』〟〜〓〣〴〓〉、〷〄〝〭〻〲〽〼〥〒〚〬〙〦〓〢〦〒〄。〛〩〿〹「〶〬〖〬〾〭〽〕〲〤〕〚〢〪〸〠〸〠〓〇〄〽〖】〵〮〦〲〸〉〫〢〹〼〗〱〮〢」〝〽〹「〭〥「〠〆〕〃〫々【『〣〝々〧〒〒】〬〖〘〗〰〭〢〠〨〖〶〒》〪〺〇〡》〦〝〾〴〸〓〛〟〞」〓〜。〡』々》〃〼』〨〾】〜〵々〥【〉〾〭〹〯〔〢〺〳〹〜〢〄〵〵〱。〯〹〺〣〭〉〛々〧〫々〛〪。〠〰〖〒〦〠〩〣〾〺〫〬、》「〚〫〲〸〶〧〞〯〨」】【〚〲『〽〡》〘〣〒〕〸『〼〘〿〘〽〤〿〶〫〆〾〔〃〱〫〱〧、 〒〰。〜〸〇〜〔〉〡〬〿〝〼〉〷、〠〘〉』〥〫〧〕》》〡〻〨〲〔〠〮】〰〮」〧〬《〦〼〽〵〭「〷〮〈〴〔〭、〣〔〥〱〔」〄〘》〡〣》〴〙〜〖〬〺〯々〟〗〥〥【〝〨〝〽〼〚。〙』〤〬〞〜〣〮〬〳〽〦〩 〶」〠〄〳〠〇〜〒〶〱々〠『〡』〭〰》〴〉〫〬〒《〽『〉〳〵〄〨〮〔〭〞』〡〚〩〦、〠【〓〯〬〦〛〽〉〜〻『〗〫〞〩〃〼〿〡〕〯 〸』》〼〮〆」〼〪〇〭〣〗〓〻〧」〙〳〱〥〳、〓〕〮〫》〧〃《〣』〹〬〣〶〡〾〙〮〕〶〧《 〨〇〺〳〉《《〕〜〰〱〕〛「〞〩 〓〢〄〣〼〢〽〇〛〟〖〘〳〤〫〡〫〬〦〘〪〶〝《』〜〕〝】〄〡〳〹々〯【〝〝〇〔〹〿〥〄〚〒〻『〺〮〇〲〒〾〙〞〉】〉〪〫〴〒〔〨〮〰〻〷〿〥〮〼〹〩〱〞〈〴〦〰〞【〻〾、〵〻〛〮、〻《〘〱〫〾〄〄〙、〔〖〘 》〻〧〦〃〣〬〰〗】〸〵。〄。〷〄〸〟〰〓、【〖〰〢〾〘〆》〜〶〻》〔〛』〦〩〷〴〃〴〫〱《「〖々〖〒〡〞。〱〡〖〤〫〇〜〒〴〯》〪〶》〘〨》》【〵〹」〤〯《〦〶〯〃〧〙〩」「〤】》々〣〱〯〞〰〢々〵〷〺〾〺〜〜〚〣〿〩〰《〄『〧《〜 〷〓〺〦々〚〨「〧〮《〥〸〞【〡〩〩〱〴〗〙〿『〇〭〖 〹〥〲『〗〛〯〷〃〽〝《〳《〡】〄々〱〆〯〦。〒『〡》〨〃〦」〬〄〬〔〭〫〼〲】』〗〔〼〴〹〠」〺〬〺〔々》〾〿「〺〖〤》〴〶〣〚〒【〤〄】〹〺〟〃〜、】〪〚〯〢〹、〶〖〭】〾〠」〉〆〾々〯〈〙〞〶〩】〺〟〫〽〫〸〵〛〙〃「〰〫〓』〻「〦〤〖〺〇。〨〟 〦〙〘〨〸〒〣〈〩〜〧〾〒〕〤 〇〴〮〝〈〿〢〴〟〷〭〴】〽〇〟〦〬〶〲。〫〸〮〝〆〸〄〣〦〲〢〇〫〻〹〕〶〥〖。〨〬【〥〽〓〵〯、〒〉〳〘〧〼〆〹〉〾〬〽】〹〲《〜〨〟〡〪〱〃〓〬〜〧〝〸、〢〝〦』〝〸》】〩〡〉〫〛〇〢〖〔〠〹〧〕〨〃〙「〲〗〙『〛。。《〸〔〾〧〉〠』〡〼〄〨〲〥〼〠〻 「〸〩〟〷【〮〜〧〿〾〜〈。〣〰〪〘〮〴〨【〩〜〟〟〼〻〦〝「〺〝〄〵〝〲〃〨〺〫〜 〮」〡』〜〿、〪々〕〫〃〒〔〛〻〲〹》【〚〣〯《〢〙〕〝〾〙〭〄〕〗〄〪〵〃〘〺〻〤〟〢〻〆〥〝】〠〬〧〾〮々〪〓』〷』〿〕〒〽、〷〉」〨〨 〄〽〾「〧〴〜〢〮〚〆〣《〬〺〟〥〼〛〆〓〚々〇々〈〉〗〨〳々〣〭〯〉【〩〮〺〪』〭〚〉〦、〃〘〦〮」〴〆〴〔〴〜「〠〴【〰「〫〳〟〾〶〉〨〲〚〩〷〄《〄〝〈。〧〟〳〃〹々〃〄〭〬〰、〥〬〸〱〉〩〴《〔【〠〳〪〧〫〽〓〭】〧」〮〒〸〤。〩』〭〖〛〭〯〨〕〞〮〞〬〹〺々〽〡〷〪〶「〹〯〝々〭〠〼〰「〒〉」」〡〆〜〾〪〾》〇〙『〚〿〽】〛〮〶〚」《〔〔〣、〄〗〩〭〠」〠〰〞《〸〧〺〰」『〾〯〃〓〓〩〣〚【〜〭 〝〨〗〷〒《〫〝〶〘〣〿〜〱〾〨〥〘〃〳〆〇〈〜〲〪〡〶〭〤『〝〖〷〦〾〬〟〠〳〻、」【〣『〺〞〴〳「〵〺〨々〩〰〢〧〣〃『〹」〉〓〘〦〣〄〕〞〵〧〜」〴〠〱》〮〬〄〶〆〬」〘 〺。〲。 〾〷〕〛〣〾〗、〭』〭〧〝。〮」々『〻〒〣「〳〩〪〝〒〥〻〘〰〼〭〆〷〭「」〚〔〬〃〝〮〩〪〽〱。〯〯〰〨〿〷「々》【〴〧〻〰、〶〡〹〩〡〺〲〼。〩〿〯』〟〴〼〦〤〙〢〩〔〲〆〗〲《〟〤〬〷〧〫〧〗〞〣〚〚〧〭〮〛〲〮々〩〩〕〬々 〥〸、〢。〿〵〺〤〲〝 〥》々〰」〮〩〛〛』々『〹〞〃〃々〚【〱。。〹〨〿〻〣〞〨〈〤〼〃〻〩〶『 〲〷〗〭〓〯〯〝〃〾〕〻〖〱《「〹〣〦 」〵〄〮〚〖〞〪〼〖〙〵》〰〃〘。【〺〖〄〪〝〭〆〬〚〬〨〽」〕」「〜〤〯〷〇〝〠〆〫〼」〭〤〓〔々〆〵〷〪〭「〆〖〇〽〄〄「〿〵〷〤〿〮〫〻〢〕〝〪〳〸〘〡〡〞〮〻】「〝〷〘〾〒〺〉〨〰』〳〓〃〒〪【〗〯「〧々〷〩〝』〭〇〒、〯〈〦〣〆〬〸〚〈〉〔〥《々》〹〢〺〤〝々《 〲〘】〚』〚『〯〼〾〱〵〻、〪〟〸〯〽〴〱。〵〪〫々〳〢〣〕〓〩「〘〜〨〻】〿〹〭〛〛〔〹〻。〛〴〤〢〮、〸〷〃〜〜〝〔』〳〮〹』〽 〶〛 〤」〢。〣〖〶〯〥》〢〸〸〤〕〣〘》〧〦」〘〻〶〾〮〢〳〝〙〻〦〺〇〲〢〔〘〶〩〖】〟〓〰〇〮』〦〄々〹〻〄〄〽〷〱〫〒〛〉〿〓〯〺〪〲〢〼、〫〬「〩《〡〕〻』〭〜〗〫】 、〈〙〉【〓〣〫〜〈『〾】〴〪〫〬〶〪〚〬〿〪〮〴〒〶〡〄〉〿〼〜〵』〻〼〢「〵。』〸〖〙〧』〾〖〙《〉〪〦〙〔〈 〤〫〦〸『〗「〣『〓『〡〨〖〥〭《〢〠〦〞〸〞〚〢〕〙〖〾、〩」「〗〈〰〸〤〴〶〤〙《々〆〽〆【『〬〝〸『〙、〪〻』〓〹々〥〲〉〪〹〫〓〽〪〩〷、〹〺〩「〞『】〡々〡「〇〉〺〶〾〔々、〾〻〪〣〖〡〩〥〾〯】〤〰》『〲【〙〭〽〛〿々〟 〢〃〼〕〫〲』〪【〛〯】〔〕〥』」〳』《〖〥〳〄〢【〩〮〫〥〝〯〿〟、〣〹〪〔〱」〖〢〘〛〾〾〜〒〝〷〚〳〣〝〟《】「》〻『〢〄〄 》〱〓〞〛〢〆〺〉《〃〭〙〻〞〷〩〹〥〦〫〞〄〇〯〽〱〼〴〾〕〸〿〱〪〨〟〠々〪〸〔〵〆」〔〖〴〝〟】《〥 』《〒〄〣〿〞》】〃〹〲〛〬。】〒〓〹〴〿〥〴〲〖〧〝〪〶「〕〔〞〜〸〬〒〽【〸〻〢【〱」〪〉 〉〘〪〻〴〞』〯〰〾〥〓〼〻〕』〠〃〟〩〛〔【〻〡「〘〔 〲々〻〚〈〪〱〾〷〗》〯〞【〩 『〕〪〈々〞〞〳〘〵〃〼〨々〇〞〈〹〧〢〃〢〮〆〈〤〘〬〟〽〩、。〲々〺〠〳〸 〸〹〥、〯〒〈〃〠〰〙〪〯〬〖」〔〹〔〘〶〾〨〿〛〈〡〯〕〶〲、〷【》〷〆》〄《】〒〓〔〼〉〒〢〄〢〓〩〰〃〔。〵〙』。〷〼〩」〒〒〇〳〆〘〯「〢〠】〱〱《〤〽〢〄〤〵〪」〆〘〲〪〼〷〕〚〙〢〳〲〦〥〃〩〳〤「〽〽〇〖〶〶〾〴〰〷〨『〟〲〬〵〲〸〩〕〣〫】〝〇〡〿〳〦【〧〖〓〫〿〣〖【〙【〵々〶『〵〟〠〇》』〲〹〾〰〰〙〚〖〳〞〄『〤〠〇、。〆〧 〒〘〱〾〢〲〵〇〼〼〪〤〵〓〴〦〵〛『〘』〭〔〯「〓」〤〼〱〒〤〶〰〖〬〻【〳〵〡〃〙〠〩〛〝〰》〸《』〦 〿〭〵〺〈〓〵〛【〴〤〒。〪〷〢〡』〒 〄〚々〽〄〔〖々。〪〠〢〸〮〵〾「〉〙〆〘〣《〩〽〃〄「〕〢〻〉〷〛〫〇〪〯〵《〷〚〕〇〟〔〛『〣〆』〸〶々〳〾《〭〯〫〄〔〗〨〺〛〴》〻〫〨〢〜〱〇〦〘〺〉〫〇〧〿〶〲〉〖〵〦〹〷〳〈〞』』〡〓〺〟〡〭、〧〺〺〱〟」「〠〡「〠〬〰〙〹〥〙〓〶〫〳〣〢〳〇〫』々〡〚「〮〘〭〹〶〸〮【〔〚〆〆〼〷〖〒〤〲〕〳〴〾〇〔〹「〦〔〹々〘〲〔〃〡〪〚〪〗〉〓〫〦 」〟〳〛〉〹〺〭〲〆〙〽「〱〘〿〡〭〦】、〠〰〢〥《〶《』〶〃〼〄〪〥〙【。』 〸〳〈〇〡〩〮〃〹〘〧〿〱々〿〭》〶】〥〜、〬〖〠〢。〾〫〔〩〥〫〓」〲〢〛〶〚〡〈」〡〦〼〰〔〾〨〔〄〹〬〛〃〇〸。〽〠〵〙〠【〶〉〇〗〔〒〒〇〉〧《〗〮〟〡《〉〻〧〝〓〱〧〜〘〦【〸〘〩、〵〡〈〴〭『〉〕〴〯〰〘〳。〴〃〙〨〄〈〿〒〕〯」〼〳〤〱『〓〚〛〳〣〳〺〒、〃〚〲〲』〳〃〷〵〹〷〾〞〞〹〣〢〨〵》〽〮〒〹〻〨〜」〇〗〨〙〒〃〆〫〹〉〻。〄〔〧〝〒〷〛〲〧〪〺〚〼〳〒〙〫〢「〲】〾〬〸〷〿〉 〱〛〙〰〜〧》〳〉】】〮〈〗〢〧〟〠〣 〭〵〰「〼〽〭〫〘〴〲〺〾〘「〮〯〩〛〤〣〥〛】〱。〬〴〞〰〣〻〵〹〤〇〴〮〦》『〨〛『〡〞〥〄〠〸〽、』〣〬〢〠〯〰〄〇〆》〇〵『〹〛、〃〟〙〡〷〿〩〥〶〲、〓〧〲〪〚〕〞〢〗〖〝〰〵〪〴〿』〱〮〳〫】《〹〟〻〝〓〦〣〞〤〷〠 〃〈〛「〱〿〆〟〟〉〤〿〈〦〥〻《〻〼〇〢〰〢〒【〞〆「〢〻〧〇。〭々【〪『〪〓】〹〃〄〹〕〝〒〚》〔。〕〶〺《』〦〗〳〰〶〨〔々〖《〰〷〛〩〨』〤〻』《。〵〱〼〵〛〝〧〼〡〶〧〾〯〷〞 〧〛。〦〛〪〕〶〱〆〤〻〹〱〰〖〨〥〚々。〾〽〦〸】〛〇〫》〃々々〲《『『〱〘〲〕〦〇〱〈〞」】〞〨〖〚〽〧〥〬〰〬〥〇〡〼〴〲〠〭〖〵〯。〙〪〖〯〄〾〮〗『〉〴〩 〃〚〲〠〨〟』〖〜〥〛〉〲〃〃〮〳〡〳〩 〄々〞〨〛〪「〼〓〭 々〵〘〄〝〭〖〰〾〬〆〸。〻〓〞『〥〗〪〚〇〞〭〤〉〼〬〕【〤】〥〡〛〖〕〆〧〝〧〺《〭〈〸〪〆〺〸〝〭〇、〆〯〴〸〤、〾〒〉〰〛〷〽〶〿〰〫〜〔〪〱〇』』〰〨〞〓〽〻〻〙〪〠〨〗〓〣〨〾。〜〃〘〚〇〟〖〗【〥。〡「〾『〙〢〦〹〩〟〠〘】〾〒〈〔『〣〲〉〉〻『〇〦〽〿〼〾〚〮〧。〷〰〲〧《〹」〕々〻〤〗〦』《〳〢、」〤 〰〞〠〨〾〪〯〮〳〒 〰〜〼〕〰〳〄》〤「〗〽〇〠〔〝〚〽〣》〷〙】〶〷〆』〇》〓〄〤〸【〡】〾『〯〶、〵〨》〼〗〨〶〉〄〭〓〲〞〝〞〡〻〷〻〣〰〈〽〮》〲《。〸〶〿〣〞。】〡」〖〩〔〜〘》〤〦」〓『〨 〹〞『〛〡〧〬〃〷】〔〫〆〤〻〲〆〯〞〿〧〔『 。〓〳〝〢〿〮〯〵〮〨》〴〒」〒〷〻〶〡〽〤〭〽〰》〾〹。〳〔〹》〴〕〫」〹〜〻〦〳〕〺〘〴』〈〽〲〃〔〙。【」〇〨》〨〴〿〄〻」〉》「〚〺〿〹〤』〄〸】〴〩々【「〫〒】〄〛 【〰〯〶〰〉【〮」〦「〣》〴〙〿〽〄〔〈〓〻〠」〚〯 〷〄〆〳《〸 〴〕〩〸〾〡〼〻〆〬〶〞〓〤〩〿〪〻、〠「〲〓〠〦〛〢〓〇〸〡〬〱】〞〫〽〖〉、〻〿〈〸〓〹〯〰〸〰〘〫 〬〬〽〦〣〾々〥《〰〗〩〰〞】〪〆〷〳〚《〯〱〓〣〭〗。〬「〢〸〮〤〓〖〾〣』〘〳〕【〼〤〔」〵〰〪〡〲。〤〃』〧〙【。〝「〶〻〝〖〢〡〿〓〖〺〝〈々】〈、『〼〣》〔〪《〢〣、〛〕〙〞〭〿〧〵」〴〾〯〫「〨〕〨〄〷』〵《〶〼〘〗】々〖 〳〶「《〝〰々〢〙〈〣〶〟〓〱〬〇〷〦〿』【〕〪〶〺〽〄〡〷〽〲》〟〃」〵〤〞〤〠〜〵〽》〉〡〦〖】〉〓〥〤〞 〺《〖〗 、」〯〳「〾【〩〮。〝〮〙】〦〴『」〘〕〉〚〯〳〇。〾。〇〔』』〚》〃、〠【〝〮」〟《〆〮〇」〥。〟〦〿〠〟〰〺〳々〯】〨〸〼〳〭〶〷〮〨〳〘〤〦。〠『〸〖「〰〝〡〻〻、〇」〇〚』〧」》〮〲〫】〱〼〻〲〷〓〉〵〩〢〣〻〚〞〧〰〽〕〭〧々〠〹〃〟〄〰〚〽〣〚〥〺〛〟〄〮〟〴『〾〒《〺〡 〒〜〈〶〔〫〲〃〟」〿〘〥〥〥〓『〝。〧〾〓〶〺〆〷〩〣〫〜〿〿〰「〕〒〓〯〣〘〗【【〪〾〛〕〽〫〹【〿〧〛〵〲〛〒〇〉〧〺」〺〺〡『〳、〪〾〒〈〮〜〞〙〱【《〣〬〈」〣〵〹〥〵〞〻〆〭〵〟〒〲〧〓〖〣〓々〰〞〹〇〮】〪〫〶「〦〽〓〻〓】〽〭「〣〔〹〯〨〖〩〵〦〳〯〯〧。〗』〾『〩〗〴」〼〗〨〵〥〴、。〒〣〧【《〓〜〓〠〢〓】〷〺〼〕〡〆、〦〿〥〾〚】〕〦〖〙 〭〬〙〇〳〄〃〄〻〧〔〚〰〲〟〷『〫 】〲〲〸〳《〢〵〰〟〪〉〜〨〇〶〻〻〩〄』〒〴〨〈』〗〿〚『〝 〹々〳〼〲〗〙「〵〲〢〔〫〵〜 〘〶【〬『〱〗、〧『〛〇〛〒〈 、〦】〙〇〖〤〩〜〉」〉〿〬〧【〶〦〃〘〈〖〄〶〦〚〜】〛〽〡〸〰々〈「〾〼〒〥〞〸」〮〸〒〗〙々『〇〄〈〃〜〺〯〉〉〾〹〺〚〞〽〦〄〢〽〄〞〻 〼〄〘〙】〚〼〫〴〚〫〬〖〭〔。〰〹〶〺〕〨〇〛 」。〇〿〲「。〆〗、《〫〬〨〻〝】〓〥〾〴】〹〈〞〺〜〰〜〬〴〱〜〖〾〣〭〥 〯〩〶〈》〸〝〼》〶〆〆〽〼「〗〓『〕〃】〡〠〹〺〈【〸〝〤〮〸〭〩〼〈〃〃〉】〳〿〃〬《 〩〈〒〢〠〆》〇〭〬〓〖〝】〧〶〞〈〶〘】」〽〝《〡 〈〟〶〯〹〦〨〷〩〧〞《〵〬〰々〞〧〓〥》」》〤〥〧〧〓〛。〦〄〫】〪〔〟〟〷〧〷〟〺〪〩〷〡〘〞「〔〽〯〔〬〈、〴〨》〥〒々〼〒"; final Analyzer a = new JapaneseAnalyzer(TEST_VERSION_CURRENT, null, Mode.SEARCH, JapaneseAnalyzer.getDefaultStopSet(), @@ -177,6 +182,7 @@ public class TestJapaneseAnalyzer extends BaseTokenStreamTestCase { // LUCENE-3897: this string (found by // testHugeRandomStrings) tripped assert public void testYetAnotherCuriousString() throws Exception { + Random random = random(); final String s = "〦〧〷《〓〄〽〣》〉々〾〈〢』『〛【〽〕〗〝〓〭〷〷〉〨〸〇〾〨〺〗〇〉〲〪〔〃〫〾〫〻〞〪〵〣【〩〱〭〨〸〃々〹〫〻〥〖〘〲〺〓》〻〷〽〺〯〫』〩〒 〇〔】〳 〵〮〇〡「》〭〆〒〜〱〒〮〺〙〼」〤〤〒〓〶〫〟〳〃〺〫〺〺〤〩〲〬 〱〜〝〤〘〻〚〻〹〒〃」〉〔「〺〆々〗〲〔〞〲〴〡〃〿〫」〪〤」「〿〚』〕〆』〭『〥〕〷〰〝〨〺〧【『〘〧〪』〫〝〧〪〨〺〣〗〺〮〽 〪〢】「〼〮〨〝〹〝〹〩〳〞〮【」〰、〳〤〩〄〶〞〠〗〗〙〽々 〟〴〭、《〃〝〈〒〸〷〓〉〉〳」〘」》〮〠〃〓〻〶〟〛〞〮 〇〨〭〹』〨〵〪〡〔〃〤〔〇〲〨〳〖〧〸 〴】〯〬」〛〨〖〟》〺〨〫〲〄〕」〵〦〢〴〰〨〺〃〓【》、〨〯〥〪〪〭〺〉〟〙〚〰〦〉〥々〇】〼〗〩》。〩〓〤〄〛〇〨〞〣〦〿々》〩『〕〡 〧〕〫〨〹。〺〿《〪〭〫〴〟〥〘〞〜〩。〮〄《〹〧〖〿》〰〵〉〯。〨〢〨〗〪〫〸〦〴〒〧〮」〱〕〞〓〲〭〈〩『〹〣〞〵〳〵》〭〷「〇〓〫〲〪『『》〧〇〚〴〤〗〯〰〜〉〒〚〔〠〽、〾〻〷〶》〆〮〉』〦〈〣〄、〟〇〜〱〮〚〕》〕〟〸〜〃〪〲〵〮〫〿〙〣〈 〳〾〟〠〳〙。〮〰〴〈』「〿《〄〛〩〪》「〓〇〶〩〇、〉〦〥〢》〴〷》〦』〉〟〲〚〹〴〲》〣〵〧〡〾〦〡〣「〆々 〔〄〓〡〬〹〣〰。〵〭〛〲〧〜〽〛〺』〛〵〒〽〻〆〚〚〟〵〲〺〠〼〻〄。〯〉〃』〕〫〥〦〕〔〢々〷々〥〥〖』〶〿〘〗」〖『〢〯〫〇〣〒〖〬〜〝〩〉〾〮〈〩、〘〰〦〧〓〬〸〓〺〼〟〰々〩〩〹〣」〓〸〄『〆〰〹》〵〉】】〼』』〸〣〦〾〰〗〴〥〴〤〃〿〡〳」〢〩〡〮〻〘〤〝〗〃〪〘〈〴〪〯「〭〓々〃〯〄〼〚〧々〢〃〈〔。】〆〣〰〜〪〮〣〿〕〮〾〱〇〈〟〭】〔〥〡〝〙〛〔「 〼〶〸々〹〯『〞〒〇〟〃〳〓〩〝〿《〵】〙〛〪 〭〼〈。〷》〨〰〵」〤〄〾〄「〈『〥〽〕〙【〤》〳〝〔〠〤〲〘〱〈『〴〫〚「」〛〸〹】〱〒〆」。〯〃】〼〮〒〄》〾〷〥〟〞〲〜〲〟〫〕〆〇〸〸〹〾〰【》〨〤〭「〇】〳〯〤、〙〳〺『〲〽〬〥〠。〹〃」〹〪〭〒 〇〶〧〟〻【」】〙〤〡〱〖》〇々〽〬〥〨〠〘〺〳【〫〄〜〹〄〚〯〈〸〻〓〥〤〻〮〃〗々〪〺〿〬〙〈『〭〩〟〽〬〝〄〦〇〥【〨〫〦〗〯〞〜〈〒〽〖〧〼〈〭〓〶〃〰〙「〧〉〹〢〕〼〒〸〼〣〡〔〩〯〼〚〲〖〪〯〒〮】〥〙〯〆〡〲〾〭〫〕〘、〖〮】〟〺〝〨〤〯〓〛》〳〢「〒〥『〿〔〸。〫〬〡〓〝「々」。〘〣〲〴〆〲】〽〮〮〲〓〞。〲〘〉【〲〭〰〨〩〱「〆〩。〦〉〇〄〺〱」〮〄〯。《〭〹〳〸〜〮〧〷〜〹〥〾〨〬〦〮』〖】〖〥〞〕〧〹〽、〺〜〯〒《々〠〠〴〝〤〇〷『〳〞〠〤〣。】〝。〛〉《〩、〦〻々〄〙〞〽〒〧】〉〺〦〔〄〯〙 〫〴〈〽〴《〰〱〗〢〓〔〗〖〖〪〷〠。〨〠〙〴〷〿〻〴〪〠 》〉「〛」〟〗「「〚〤「〫〨〣〉〶〥〢〈〯〄〈】〃〵〪〼〸「〾〥〒〲〮】〙》〡〯〓〵〡《〬〾〛】〄〡〦〪、〆、〵〒〹〰〴〜〬〶〭〕〟〠〰〜〶〵〨〾《 〻〵〔〘〟〾「〡〃〼。〤〺〭〨。〪》〄〇〄〔〖〺〪』〆〸『〰〭〆〗〪〪 〇〜〡〨〞〧〇〛〥」〼〇〼『〸『〵〼〇 〽〹〨〪〗〳〽』〵〽〸〷〄〿 〩〢〺〳〗〞〹〒〼〕。〇〷〔〯〜〘〾」」。〥〯〤〖〛〙〹〘〯〡〱〮》〰〾〚〚〣〆〰〹〾〝〉〲〠〗】〤〿〶〱〾〇〽〤〰〆〭〝】〤〰〼〪〬〰〸〓、〃〵〄〉〤〲〱〨〵〴〮〹〬〧〜〭〶〒〯〺〬〒〭〲〡〔〚〹〇〫【〯〥〪〻々々〨〧〳〛〯〿 〈〽〥〘〖〣〿〫〲〶〚 〓〙〫〴〆〙〶〽〉、〔〪〫】〤〟〓〃〝、〧〡〸〸。〸【〹 〧〡】〡「〗〴〴〳〶『〱〖「〺〠〼〾〱〃〖〤「〧〭〟〇〧〙〕〩〭〻〤〩〪〳〪〟々。〷〥〗。〳〸〆〢「〆〿〻〚〳〚〸〟〘〡〘〇〶〖〡〇〾〥〖〝〝〹』〦。〖「》〥〞〳〛〕〖〥〻〙〾〔〬〈〇〓」〭〹〷〪〖《〫〾〒〙〺〻〨〼〇〝〾〣〴〚〩〴〕〢〦〩』〭〧〵〾〟〣〬〥〟〣〜」、《〲〧〪〸〸【〙〹、〤〽〰。〦〩〮〹】〸〆〹〗〓〶〇〤〳】〾〨〞〩〱〡〇〱〮。〶》〝〱〗〃〘〣〬〲〽〈〒〻〃〥〪〭〤〗〰『〵〹〙〇〵【〕『〤〄〕〥〵〸〮。〳〮、〤〣〱〧』〯〜』〉】『〷 〰〵〓〙〃〟〆〼〞「〫〄」』 〨〹〸《〷〔〫《〝〞〆〬〩〟」】〾〷〄》 〵 〫〵》〻〨〰 〟〈〰〽「」〸〣〪〮〛〞〜〦〱〚』〕〱〪〲〩〥「〚〓〺〣〶〨』〕〇〮〹〟〞〕〶〡〭〠〕〦〦〢〽〤〈〈〻〣〧〱〿〵】〖〞〖【〢〩〼【 〻〘〃〤〫。〠、〗〢〷」】〼〘〖。〤〘〄〢〴、〘〆〯〱〜〃「〦『〯〰〘〫〹〶〷〿、】々〙〛〜「〹々〮〿「〸〉』〯〱〄〓〥〣〩〥』〖〤〛【〭〿〺「「〳〛〧〉『〈〆〒〠》〳〈〳〩〃〮〚〼」〲〮〩〮〮〢〸〿》〈〉〗〾〇〕〩〸〖〾〠》〃〞〄〣〭〡〕〣〚〆〤〄。〸〞。《〼〄〤〸」〿》〤「〵〥【〔〕々〙〸〛〛『〶〾。〷〫〼〽〤〨〓〭〻〈〶〿〾〨』〤【〾」〇〤〒〠〺〜〸〼〪〢〷〔》〣〤〬〣〱〝〇〺〢〠〤〹〡「〪〲〿〬〘〡〯、】〖、〈〶〛〢〕々〽〼〼〚〿〘】〢〰〡〿〗《〉〙《《『〶【、、】〡〓〦〞〵〤〧』〝〕 〄〃〸〈〤〪〻〭〉〘〷〉〕〨〻〢〢〡〸〔〮〧〹「〦〘〉〾〉 〺〽〷「〺〖〺〝》〃 〇〪〜〶〺〣〇〭 〾」〣〼〞〷々〽〤〶々》〻〈〽〒〕『〬」〈〟〕〷〼〲〄〚〜〴。〮》々〧〻〔〕〈「〾『。〴〷〯〢〿〦〈〸〩〻〃〻〚〞〤〈。〧〇〾〺〢〓〵〸〛〔〡〷【〜〺 〕〶〦〣〻〟」》】〺〚〷〺〹〙〳〺〬〓〢』〘〕「〸、〙〾》〖》々〬〄〇『。〵【〩『〺〆〮〮〙〵〫《〃〽、〓〠〨〚〕〈『〦【〗 〄〴〫〡〮〱〔〆〗〟〵】〻々》〲【〬〢〚〛』〱〰〫 〇〤〴〮〾。。〮】〇〲〻〙〰〥〚」〟〜〄〟。〤々〞》〧〉〳【〿〺〆〈〖》『〤〄』〾〵〲〸。〈『〕〺〘〣〶〬『〪〆〳〽《。〒「〽〨〸〜〚〘〪〤々〦〆〺『〣〆〽〇〿「〥〵〒〲〟〜〳〭〼〆〡〮〆「〆〥〺》〱『〺〔〃〙〻〥々《々〙〼〪〼〵〙』〥「 〵〯〓〩。〰〕《〟〦〝、〦〦〤〗〴〩〹〶〠〰〡〇〤〹〓」〣〆〜〴〘〔〃「〤〈〩 〠【〃〙〢々〉〝〬〙〭【〮〗〙〤〿〖〓〫〻』〞〤〼〳〹〄〵〾〔〛〮。〒〉〤〣〭〰〨》〭〲〗〃〇〆〡〜〱〲〮〫〄〬〄〉〯〈〮〩【 〮〦《〪〲〣〡〶〬『〲〵〇〶〰〒〭〽 〰〄〻〄『〬〩〠』〕〫〤 〼〶〳〮、〓〸〲〓〜〳〺〈〫〺〒 〨〡〡【〷〆〇』〝〩〨〗〕〪】〪 〛〛〺〙〷〦〠」〱〞〼〸」、〢 〺」』〲〆〃〟〱〟〝「《〸〳〒〖〨】〥〖〈〧〼。〫『〙〧〡『】〔々、〼〝 〕〙〇〘〲〔〝〺〘〄〓〒〼〈〛『〺々〩〱。』〬【〱「〳〜〼〬〴』《〗〔〡〰〪〤〥〲《』〥〉〪【〶〤【〻〡〒〯〜【〽〪〉〠〾〙〰〚〵〦〦〴々》〙〠」》〠〱〓【〶〦々〻【〽〶〼〺〷。〶【〘》〻〗〳〣、」】〳〓〞〆〆〾』「〈〙〕〱〢〳〨〰〡〸。〣〪〤「〱「『〙〽〇々【〜〖〮〚〟」。〜〰〉〔。〣〽〇〖〬〆〥〖〧〨〱〡〸〪〣々』〄「『〞〶 〴〰】〃〱〱「〶〝】〞〭〚〴〶〻〟〧〡〳〬〧、〣】〕〼「〠〃〷〣〩 〭〄〩〝〦』〟〇〦〟〕。〩』「〵〩》〿〻『〙〼〲〰》〨〉〆〓、〺〹〸。〞〧〗〘〳〓〞〹〕〡〼〔〖〴〄】〚〻〯〴〣〮〦〧〣〵〼〚〾〫〼〣〔〚〽『〵〒【【〝〹〮》『〨〜〠〸〠〵〨〙【〧〸〈》〱〗【〓〤】〰】】、〩〽〈〸〔「〵〻〙〓〰〇〚〞〗〙〢々〭〜〈 。〧〿〧〨〵〾〝〬【』〫〦〸〬〈、〒〢〉〞〵〒〼〝》〻〫〧〤〶〹〼〩〛〫〣】〿」〴》〺〬〤〕〲〕〙〔〪〰〿〬〒〔〞〆〻〴〘〩〨〤》〩〪〭〳〇〣〚〟〚〕〓〴〱 〵〃〠〭〠〚〗〃〃〸〰〢〡〿〭『〗〉、〲〕〧「〛〛〓〜〰〮』〱〨〬〨〽〸〽〶〣〯〫、〯々、〴 」〕〥』〻】〖〴」〨。〖〤『〜〰〩〣〣〸、〫〝、〯〹〷〳〚〄〷【〃」〼「〤】 〢〖 〣〙〺〽〽〱〤〔〓々〣〭〽〘〦〻〪〿〞〝〱、《〆」〸〷〛〓〕〹〜〪〹〶、〵〦〛〲〒〹〪〦〃〥「〸〪〙〧〱〠〰〝〆〠〯《〼 〛〚〔〟〽〗」、〲〥〞〴〃、「『〖〼〞〪〼〇』〿〶々〙〻》〥、〵〛〞〠〫〟  〹〾〵』〤〿〣〪〗〃〖〬〩〴〗々〓〝〥〥〜〲〯〗〤》〛〮》々〚〘〫「〙〉【〆〽〨〹〮〧〷「〴〝〬〷〗『〔〷〮〟〲〬〸〸〟〹〆〖〨。〣〄』〴〚】〘〲〚〚〦〈〛〗〞〉〞〯〆〵〸 〗〕、》【〸〮〵〉〥〨〕〟〭【〾〇〵〬〾」〱〹〚〟〛〡」〩〃〄〬〱〭〚〱〆〛》〣』〝〡〦〣〫〒〗〛〿〤〇〼〠〲〢〬〿〓〠」〚〇〛〈〴《〦〱〤〹〝〱〶〟〙〴〶〣〝〮 〜〲〱〿〳〪〄〝〃〰〙〖〼〰、〬〰』「〭〻〮 〩』〱【〆〻〺〸〾〤〗〸〥〽〼】〤〣〖「《〡〙。〸、」』〠》〴〈、〴〢〣〲〟〳〸〒〠〣〵〢〿》』〿『〾〔〢〶〦〟〠《〹「〷〽〷〆〇〉〲〿〵〙』〫〠々々〘』《〽〒〦〽〓〳、〮〻〫〞〲〰】【〗」々〥』【〫〆〫〳〾〣〖〺〷〙〘〄〈〼、〧〻〭〮〳』〘〾〇〸〉〽〗『〙〽〻〟〇〘〽〖〴〄〓〞「〦〪〚〾〨。〕〻〰〟〉〢「〉〿〯〔〹〃〛〛〝〔《〵「「〴「〗〸〖〞〦【々〣〲〤〾〿〽〲〥〢〥』〳〳〼『】〆〼》〩》」。〛〲〡〳「〢〥》〘〠〃〳〃〒 〧〓〡〤〄〲」〦〶〷〟〛〠〱〽〫〫〸〇〔、〪〛〠 」〢〳〸『〸〚〹〈〘〉〫〇〲〲〈〕〙〱】〯々【〬〖〿〒】〔〭〣〚〄〈』〧〗〹】〇〬〸〾 〭〺』〯〫〻『〘〻〱 〴〆〘「〠〈〫〡》〤〕】〜〙〵〒〙。〦〮〞〪』〴〓〪〾〝〹〴〼《〦〞〖〆《〥〸〻〈〽〪〤【〖〶〞〤〃〰〨〱』〨〼〱〠〣」〝〹〝〕〼〔〃、〮】〤【〼〤〼〥〪〲〓〦〘〟〞〭〜〸】〚〸〵〞〙〧〈〽〹〄『『〙〓〸〯。〜〺。「〖 〶々〉〈〮《〢〭〶】〘〜〺〸〒〥〢〾〈』〱〃〤〳〖〉〼〫〛〚〽〫〳〰〫〥〜〜〺〷〲《〢『〛〭〈〧〳〣〜〝〧、〥〾〻〳〺〕〥〥〼》」〺〮〒〣〥〲〟〠〫」〾〱〼「〄〆「〓〽〹〵〈〙〛〵〰〩〟〫〈〔々〒〟々〉、〷〚〶〆〘〛。 【「〸〸〖〫〕〰〱〺〟〫〿〹〩〇。〾〒〚〲〾〛〳〨〦〙〒》。〺〧〡〞〒〚〩〪〶〘〣〨〶〩〛〺〙〪〄〼〮〰〒〡〼〓〙〒〇〽『〃 』〇》〽〃《〒〠「〚〨〗〶〴〪〮〵〘〨々〓〗〚〠』〗〮〳〺〲〙〒「〴〼〻〤〉〯〨〧〈】〾〟〝〒〃〘〧『〶〿、〤〝】〜〴〰〷〽〮〱、〩〽〺〯〫〜【〴〈〳〖〬】〦〘〗〜〝〄〚〚〤〨〲』【〞「〰〔》〷〥〈〡〳〢〾〮《〭〫〡〴〹〻〚「〰〻〉〣〢〤〤〝〩〧〙《〓】〺〺〓〿〹〈〚〱〬〘《〽〈〕》〣〓〒〴〆〜〭〖〛〝〷〧〴〮》〳〘〸〴〿〥〙〒〔「》〓〕〦〯〾〯〝、〮、〯〆〛』〞〝〵〥〬〚〡〰〔〵】。〽〥〿』〩〇〝〄〴〪〭〸〫〡〣〧〆〚〫〴〙〦〽〉〸〼。〱〨〛〠。〮』〝》〻〹〈〄《〻〱〥〞〽〾〄〝〢〿。〴〆〲『〰〢〖〲〼〯〃〠【〲〵〛〣〝〕〬〺〰〪〻『〨】〖〥〵〹〯 〒「〠〮〈〃〹〽〬』〹〷〫〕〧〟〒〉〉、〈「〟》〼〪〰〗〘『〞〉〹〚〤〩〦〗〖〮〰〇〠〫」〔》〮、〆〡〛〻〙「〵。〯〹〘「〵〫〼。】〃〢〺〴〛〪〬 〞〟〓」〭】、〸〘〻〈〤》〓〩〽〆〵〨〈「〦〠々〨〒〢〛〝〿〗〥〱〕〩〖〣〄〚〿〆〗〢〉々《〚〩〶》〥 【『〪〯〾〸〪〲〞〠 〡〓〻〷〢〕』「〹〯〛〫〲〗〗〚」〵【〪〢〥〫〆》〦〥〱〯【【〉〧〺〻〉〬〳〒〳〾〲〲〇〇。〪〙〧〿〆【」〇〪〸〽〦〚〽〿 〠〺〥〦々〬〄〟〪〭、】〴〾〸〛。 『、《〫〺〯〛〩》〓〴〪》」々〧【〦〇〮〬〲〗〔〦〴〣〼〨〖〩〬〼々〛〇」〴〦〉〤〺〪《 〒〧々〤〧〣〘【〵〛〢〵《〛〘〵〓〶〳〤〺〨〣〭〤〪〮〺〷《〗〵〞〻〠〭〃】〄〒〯々〶〉〞々〽〤〇〦『〦〽〩〬〠』〷〄〩〙〖〝『〘『々〔【〿〰〶〪〱〉〘》〃〙〧〦〇「》《〹〰〯】〹〄〈〪〜〵、〮〣〇〯〲〛〬〕】々〸〹〩〟〳〆〥〯〬〠〭〯『〙〆〾『「〈〬〹〕〾、〸」〷〥〆〺〾〖。〆〒〮〻〡。〉々〕』『〨〼 〢〓『〢」々『 〘、〖〤〜【々〤〷〵〳〤〽」〟〥〴』、〒〥〆〙〬〧〔〡〄》〷。〣〉〪〙〚〾〣〵〰〮〔〇〝〫〫〩。〪〷〩。〇〿】〲〦〳〕《〄〴〦〽〔、〱〧〟。〻〺〔〝【〲〔〦〙〖》〠〫】〵〙〰。〖〸〼〣〗〲々〤〢〷〝〰】〳〳〯〟〓〬〺〤〿〲〩〞〡〧〲〧〭〽〪〰〥〧〴〈〈〢〕〯〔〨々〭〸〡〖〓〤〒〝〻〻』〣々〸【〸〸〷〓〇〦〻〤』〉〾〛「〢〢《】〜々〛〇〠〒〹〖〽〮〚〫〜〼〄〓 〹〽《〽》〮【〺〦〠〨〰〸〘〲』」〹〳〤〽〴〴〰〳〷〟】〼〽〓〇、〡〚〶 〥〄〉〴〵、〷〳〥〬〳〓〩〯〜〪〯〬々〢〾〆〨〥」』〪〄〨〽〗〭〯〼〒〡「々〩』 〉〔〓《〉〺〫〖〽〱〳〡〪〯』〼〉〝〟〹〯〇〠〥〨〖「〢「〥〲〘『〹〥〶〜〥『〃。〲〗〢〩〮〕〨〸』〪〯〲】〠〻〟〶〣〸〵〩〔〾〞〳〾〇〵〥〟〭〳〡〆〾〤〶】〈〓〄〮〢〒〩《〔〭〄》『〰〧〡〖〵〥〵〒〭〳〵〝〜〱々〞〰〴〦〱〿〾〴〪〥〧〚〚〒〚〘〿〛〾〫〚〕〷〔〗〢〻〠』〘〾〖〿〦〥〮〆〼〞〴〹〸〻〵〞〄々〷〔《】〛〒〻〓〴〮〛〺》〫〬々〦〦〬」〯〞〼〚〘〰〿〝〾〘〠〵〴〃〞、〹〢〗〹〰〤「〔』〇〒〭〫 〞〉〿〜〳〫〩〿〧〵〟〾〤々〩〝「《〬〃〇〬】〔〇〆〷〭〬〵〾〚〺〬〧〻『」〈」〻〹〞、】。〉〯〫〺〒〙」〱〛〻「』〱〺〠〄【〿〦〰〸『〬〴〓〨〢《〣〓〜〒〡『〼〔『〵〕〝〗〳《〲〳〼〝「〽〬〱〺〠〱〽〘〗〹〨〆〕〠々〓〤】〺〉〴〰〮」〰〿〹〳『〠〔〇〧〭〼〪〭〯〖〶〬〃〱〔〙》〺〜〵々】〡〧〲』〕〛〳〥〩〱〮《〦〫】〖〈》〞〻〤〢〦〪〬〲〗〢〷  〳〰〓〕〜〥」〬〗〒〜〉〩〆〬々〿〪『〣〘〡〘〯〳【〄〠〸〼〈〰『。〟〲〭〡〷〥〯〴「。〤〓〪〆〦〆〒〽〫〰〚〡〨【〯〹「〧」〓〖〘〳」〕〲〚〣〕〆〃〱〞〷〺〻〃』〩〫〦〱〴〟、〰〘〞《」〛〤〿〔 〤〱》〗〷〡〡〗〞〦〿。〤〳】。〟〻〉「〻〙〖〿〄〶」〾〫〽〸〕〢〰〞〞〒〜〻〠〭〫 〞〴〰〶〺《〣々〩〲〡〴》、〩〝〞【〼〓〱〻〩〒〖〿〮〱〧〟〒〶、〿〈「〻〴』』〇〉〝〛〢〜〼〘〰〇〢〃〲〟〨〟〣〟〰〉〮〘〽〧。〓〳〩〺〳〓〘〗〖〈〜〴〟〽〣〣〾〽〩〲〜〇〰〩〕〧〚〄〴〴〴〨〠〦】、〣〺〖》〯〷』〒〤』〙〗〬。〧〆〜 〧〩〯〞〜〬〡〆、〞〔。〾〩〈〛〼」〾〮〤〾〟』〉〔〞〾〛〲《〈〫〝〽〳〞〔【〿〽〩。〈〨「〯《々〇、〯〜〾〝〯〼〆〟〉〝〮〙〪〚〮〱〹〯〜〟〠、〄〹〧〳〱〯〖〯】〩〴【〫〇「「〿〩〷〾〴〯〦〼〦〟〖〤〪〥〰〔〻〪〄〖〳〵〟〕〰〬〶〚『〘〻〇〽〪「〉】〮「〣〿〇〭〕〓〵〽〆〳 〨〩〕〬〵〸〻〲【『〥〖〚〢〰』〠。、〮〣〆〴『《〲〓〷《〱〰々〫〶〢〯〗〚〙〶〫〖〃〻》〰『〱〘〫〛〄〉「〠〱〚〖〕》〤《 〵〶〢〯〗〳〛〚〽〗〟〛〪〾〶〞〶々〆〯〇〝〕〨〨〣〫〄〵〞〛〬〣《〦〦〒〉〙〫》〞〨〜『〝〻〒〟〓〜》〡〡〫〻』〆〒 〔。〓》《〨〙〿〙〔〘〮〦「〚〻、「〵〠〉〬。〭】〱〸「〶〈〞〈〪〟〻〝〲〮〆〼〯『〱〡〙〮〕〒〣」〳〥〙〡〡『〇〠〡〭〷〜々』〣」〼々、〗〡『〽〻〽〳〉〄〵〬〽〯〥〾〙〉〿〮〴〷〥〡〰〹〰《〺【〒」〙〾〽。〴〘〕〝 〳」〡〇〩〥〾〆〨〉〫〠〙〤〒【〸々〣〓〰」〈〪〵〠〚】〈〆〵〗〜〦〣〃〼〔〉》〆〞〚〆〄〫〺〽〪々〩〴〵〹〿〔〥〜〩〪〤〗。「〽〨〟、〄〽】〩〙〝〺〶〸〟〯《《〥〣〻『〟〽〮〄》〙〕『」〾〼〷』々〥〒【、〗〔〯】〮 〹〩】〡〇〟〫〢〨〡〭」〄〼〙〪〻〪々〙。〫〧〪〞〾〄』〟〶〇〞〜〥〘。〝〨〸】〕〔〨〕〾〃〾〒」〈〒〓〼〗〖〕〱〙〘〓〝〾〔【〵〿〖〸〷〵〩【〞々〼〢〧〻〥〰〦〤》〰〛〡。〖〝〙〒〽〜〕〘「。〵〇〒〾〼〽〈〣〇〒〙〢〸、〞〲》〪〰〴 〽〭〷〸〫〆〞〾〨〆〛〔〤〜」〈〨〃〈〴〽〲」。【〞〒〉。〱〕〨〽。 〷』〦》〵〩〪〡〕〞〹〃〧〃〝〢〴。〃〛〭〻〣〸〖〞〻【〛》〜〳〜〟〘〄」〸〬〶〥》〨〭〡〦〇〇《〱】〸〼〺〬〛〓〔」〰〈〧、【〕」〳〼〗〯〉〒〖」〧〩》〴」〺。〰〷』〩〚〭〞〰〶〚〲〙〥〢。〽〵〱」】〓〘〦。〭《〥〙、〱〹〦】〕》〲、〘〓〙〷、〪〕〉〭、〇〜々〖〨〞」〠〕〲〨〕〔〻〿〙〘〙』〼〘〡〢〧〚〢〷〸〰〟〰〗」〪〛【〪〺〒〱〈〦〽、『〥 〙〪〕〝〄〛〣〴〯〆〒〰〜〪〆〠〞〾〃〭〬〡〉】〄〃〥〥〒〶〕〢〵〣〢〨〘〩〹〖〧〒〺〫〕〡〆〭〘〿〠〹〲〔〫》〪〰〇「〯〫〈〾〱〄、〮『》〹〿〿〱〦】〳〰」。【〘〆〞〚〱》〫〷〸〠〲〚〶〷〘〩〯〛〄々 』〪〭〬〖〪〦々〼》〇〤。〉〯〟〮〢〤〬〜〪〬〺〿〹〖〔】〕〖〣 『〵〸》〧〻〺〜〧〯〄"; final Analyzer a = new JapaneseAnalyzer(TEST_VERSION_CURRENT, null, Mode.SEARCH, JapaneseAnalyzer.getDefaultStopSet(), @@ -189,6 +195,7 @@ public class TestJapaneseAnalyzer extends BaseTokenStreamTestCase { final Analyzer a = new JapaneseAnalyzer(TEST_VERSION_CURRENT, null, Mode.SEARCH, JapaneseAnalyzer.getDefaultStopSet(), JapaneseAnalyzer.getDefaultStopTags()); + Random random = random(); checkAnalysisConsistency(random, a, true, s); } @@ -197,6 +204,7 @@ public class TestJapaneseAnalyzer extends BaseTokenStreamTestCase { final Analyzer a = new JapaneseAnalyzer(TEST_VERSION_CURRENT, null, Mode.SEARCH, JapaneseAnalyzer.getDefaultStopSet(), JapaneseAnalyzer.getDefaultStopTags()); + Random random = random(); checkAnalysisConsistency(random, a, false, s); } } diff --git a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilter.java b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilter.java index 2672cc94102..2ad4464a9ae 100644 --- a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilter.java +++ b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilter.java @@ -46,7 +46,7 @@ public class TestJapaneseBaseFormFilter extends BaseTokenStreamTestCase { } public void testRandomStrings() throws IOException { - checkRandomData(random, analyzer, atLeast(10000)); + checkRandomData(random(), analyzer, atLeast(10000)); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java index 34d23b8e92c..daa0e7c0fd6 100644 --- a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java +++ b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java @@ -67,7 +67,7 @@ public class TestJapaneseKatakanaStemFilter extends BaseTokenStreamTestCase { } public void testRandomData() throws IOException { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilter.java b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilter.java index de8bbe3b875..658409ddab2 100644 --- a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilter.java +++ b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilter.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.core.KeywordTokenizer; import java.io.IOException; import java.io.Reader; +import java.util.Random; /** * Tests for {@link TestJapaneseReadingFormFilter} @@ -59,6 +60,7 @@ public class TestJapaneseReadingFormFilter extends BaseTokenStreamTestCase { } public void testRandomData() throws IOException { + Random random = random(); checkRandomData(random, katakanaAnalyzer, 1000*RANDOM_MULTIPLIER); checkRandomData(random, romajiAnalyzer, 1000*RANDOM_MULTIPLIER); } diff --git a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java index 574438cb705..57e2261ad3b 100644 --- a/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java +++ b/modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java @@ -23,6 +23,7 @@ import java.io.InputStreamReader; import java.io.LineNumberReader; import java.io.Reader; import java.io.StringReader; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -182,24 +183,26 @@ public class TestJapaneseTokenizer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); - checkRandomData(random, analyzerNoPunct, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), analyzerNoPunct, 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); checkRandomData(random, analyzer, 200*RANDOM_MULTIPLIER, 8192); checkRandomData(random, analyzerNoPunct, 200*RANDOM_MULTIPLIER, 8192); } public void testRandomHugeStringsMockGraphAfter() throws Exception { // Randomly inject graph tokens after JapaneseTokenizer: + Random random = random(); checkRandomData(random, new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new JapaneseTokenizer(reader, readDict(), false, Mode.SEARCH); - TokenStream graph = new MockGraphTokenFilter(random, tokenizer); + TokenStream graph = new MockGraphTokenFilter(random(), tokenizer); return new TokenStreamComponents(tokenizer, graph); } }, @@ -208,7 +211,7 @@ public class TestJapaneseTokenizer extends BaseTokenStreamTestCase { public void testLargeDocReliability() throws Exception { for (int i = 0; i < 100; i++) { - String s = _TestUtil.randomUnicodeString(random, 10000); + String s = _TestUtil.randomUnicodeString(random(), 10000); TokenStream ts = analyzer.tokenStream("foo", new StringReader(s)); ts.reset(); while (ts.incrementToken()) { @@ -229,7 +232,7 @@ public class TestJapaneseTokenizer extends BaseTokenStreamTestCase { if (VERBOSE) { System.out.println("\nTEST: iter=" + i); } - String s = _TestUtil.randomUnicodeString(random, 100); + String s = _TestUtil.randomUnicodeString(random(), 100); TokenStream ts = analyzer.tokenStream("foo", new StringReader(s)); CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); ts.reset(); diff --git a/modules/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java b/modules/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java index c272b7dc1d6..be4f7d13a88 100644 --- a/modules/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java +++ b/modules/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java @@ -105,6 +105,6 @@ public class TestMorfologikAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandom() throws Exception { - checkRandomData(random, getTestAnalyzer(), 10000 * RANDOM_MULTIPLIER); + checkRandomData(random(), getTestAnalyzer(), 10000 * RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java b/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java index 79aea27abb8..f49f3f42a72 100644 --- a/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java +++ b/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java @@ -73,7 +73,7 @@ public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase { } public void testRandom() throws Exception { - final int codeLen = _TestUtil.nextInt(random, 1, 8); + final int codeLen = _TestUtil.nextInt(random(), 1, 8); Analyzer a = new Analyzer() { @Override @@ -83,7 +83,7 @@ public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 1000 * RANDOM_MULTIPLIER); + checkRandomData(random(), a, 1000 * RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @@ -94,7 +94,7 @@ public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase { } }; - checkRandomData(random, b, 1000 * RANDOM_MULTIPLIER); + checkRandomData(random(), b, 1000 * RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { @@ -102,7 +102,7 @@ public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new KeywordTokenizer(reader); - return new TokenStreamComponents(tokenizer, new DoubleMetaphoneFilter(tokenizer, 8, random.nextBoolean())); + return new TokenStreamComponents(tokenizer, new DoubleMetaphoneFilter(tokenizer, 8, random().nextBoolean())); } }; checkOneTermReuse(a, "", ""); diff --git a/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java b/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java index 60a3b1a7cad..aa36271f733 100644 --- a/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java +++ b/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java @@ -91,7 +91,7 @@ public class TestBeiderMorseFilter extends BaseTokenStreamTestCase { @Ignore("broken: causes OOM on some strings (https://issues.apache.org/jira/browse/CODEC-132)") public void testRandom() throws Exception { - checkRandomData(random, analyzer, 1000 * RANDOM_MULTIPLIER); + checkRandomData(random(), analyzer, 1000 * RANDOM_MULTIPLIER); } public void testEmptyTerm() throws IOException { diff --git a/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java b/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java index fd56adab6d2..5cfa5e71b20 100644 --- a/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java +++ b/modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java @@ -91,7 +91,7 @@ public class TestPhoneticFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 1000*RANDOM_MULTIPLIER); + checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @Override @@ -101,7 +101,7 @@ public class TestPhoneticFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, b, 1000*RANDOM_MULTIPLIER); + checkRandomData(random(), b, 1000*RANDOM_MULTIPLIER); } } @@ -114,7 +114,7 @@ public class TestPhoneticFilter extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new KeywordTokenizer(reader); - return new TokenStreamComponents(tokenizer, new PhoneticFilter(tokenizer, e, random.nextBoolean())); + return new TokenStreamComponents(tokenizer, new PhoneticFilter(tokenizer, e, random().nextBoolean())); } }; checkOneTermReuse(a, "", ""); diff --git a/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java b/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java index e232a7bdf48..4a8c8b11b75 100644 --- a/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java +++ b/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java @@ -20,6 +20,7 @@ package org.apache.lucene.analysis.cn.smart; import java.io.IOException; import java.io.Reader; import java.io.StringReader; +import java.util.Random; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Analyzer; @@ -223,15 +224,17 @@ public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new SmartChineseAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new SmartChineseAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } /** blast some random large strings through the analyzer */ public void testRandomHugeStrings() throws Exception { + Random random = random(); checkRandomData(random, new SmartChineseAnalyzer(TEST_VERSION_CURRENT), 200*RANDOM_MULTIPLIER, 8192); } public void testEmptyTerm() throws IOException { + Random random = random(); Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { diff --git a/modules/analysis/stempel/src/test/org/apache/lucene/analysis/pl/TestPolishAnalyzer.java b/modules/analysis/stempel/src/test/org/apache/lucene/analysis/pl/TestPolishAnalyzer.java index 21c55238d93..43561f72cdf 100644 --- a/modules/analysis/stempel/src/test/org/apache/lucene/analysis/pl/TestPolishAnalyzer.java +++ b/modules/analysis/stempel/src/test/org/apache/lucene/analysis/pl/TestPolishAnalyzer.java @@ -51,6 +51,6 @@ public class TestPolishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new PolishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(random(), new PolishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git a/modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java b/modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java index 0fee64a0823..cdcba404d8b 100644 --- a/modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java +++ b/modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java @@ -118,7 +118,7 @@ public class UIMABaseAnalyzerTest extends BaseTokenStreamTestCase { @Test public void testRandomStrings() throws Exception { - checkRandomData(random, new UIMABaseAnalyzer("/uima/TestAggregateSentenceAE.xml", "org.apache.lucene.uima.ts.TokenAnnotation"), + checkRandomData(random(), new UIMABaseAnalyzer("/uima/TestAggregateSentenceAE.xml", "org.apache.lucene.uima.ts.TokenAnnotation"), 1000 * RANDOM_MULTIPLIER); } diff --git a/modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java b/modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java index 85035cc5d77..61a012850f8 100644 --- a/modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java +++ b/modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java @@ -60,7 +60,7 @@ public class UIMATypeAwareAnalyzerTest extends BaseTokenStreamTestCase { @Test public void testRandomStrings() throws Exception { - checkRandomData(random, new UIMATypeAwareAnalyzer("/uima/TestAggregateSentenceAE.xml", + checkRandomData(random(), new UIMATypeAwareAnalyzer("/uima/TestAggregateSentenceAE.xml", "org.apache.lucene.uima.ts.TokenAnnotation", "pos"), 1000 * RANDOM_MULTIPLIER); } diff --git a/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java index 9e0cb607365..476f7e6edbf 100755 --- a/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java +++ b/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java @@ -102,7 +102,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { assertTrue("Index does not exist?...!", DirectoryReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); @@ -189,7 +189,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { assertTrue("Index does not exist?...!", DirectoryReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); assertEquals("100 docs were added to the index, this is what we expect to find!",100,ir.numDocs()); @@ -228,7 +228,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { assertTrue("Index does not exist?...!", DirectoryReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs()); @@ -301,7 +301,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { assertEquals("TestSearchTask was supposed to be called!",139,CountingSearchTestTask.numSearches); assertTrue("Index does not exist?...!", DirectoryReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); assertEquals("1 docs were added to the index, this is what we expect to find!",1,ir.numDocs()); @@ -431,7 +431,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { // now we should be able to open the index for write. IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND)); iw.close(); @@ -497,7 +497,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { TermsEnum termsEnum = terms.iterator(null); DocsEnum docs = null; while(termsEnum.next() != null) { - docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(reader), docs, true); + docs = _TestUtil.docs(random(), termsEnum, MultiFields.getLiveDocs(reader), docs, true); while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { totalTokenCount2 += docs.freq(); } diff --git a/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java b/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java index b5d5b545116..3350ac5254b 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java +++ b/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java @@ -157,7 +157,7 @@ public abstract class FacetTestBase extends LuceneTestCase { pair.taxoDir = newDirectory(); } - RandomIndexWriter iw = new RandomIndexWriter(random, pair.searchDir, getIndexWriterConfig(getAnalyzer())); + RandomIndexWriter iw = new RandomIndexWriter(random(), pair.searchDir, getIndexWriterConfig(getAnalyzer())); TaxonomyWriter taxo = new DirectoryTaxonomyWriter(pair.taxoDir, OpenMode.CREATE); populateIndex(iw, taxo, getFacetIndexingParams(partitionSize)); @@ -242,7 +242,7 @@ public abstract class FacetTestBase extends LuceneTestCase { * Sub classes should override in order to test with different analyzer. */ protected Analyzer getAnalyzer() { - return new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + return new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); } /** convenience method: convert sub results to an array */ @@ -285,7 +285,7 @@ public abstract class FacetTestBase extends LuceneTestCase { TermsEnum te = terms.iterator(null); DocsEnum de = null; while (te.next() != null) { - de = _TestUtil.docs(random, te, liveDocs, de, false); + de = _TestUtil.docs(random(), te, liveDocs, de, false); int cnt = 0; while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { cnt++; diff --git a/modules/facet/src/test/org/apache/lucene/facet/enhancements/TwoEnhancementsTest.java b/modules/facet/src/test/org/apache/lucene/facet/enhancements/TwoEnhancementsTest.java index 866bb11f511..9a126bdf15e 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/enhancements/TwoEnhancementsTest.java +++ b/modules/facet/src/test/org/apache/lucene/facet/enhancements/TwoEnhancementsTest.java @@ -56,8 +56,8 @@ public class TwoEnhancementsTest extends LuceneTestCase { List categoryPaths = new ArrayList(); categoryPaths.add(new CategoryPath("a", "b")); - RandomIndexWriter indexWriter = new RandomIndexWriter(random, indexDir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), indexDir, newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); TaxonomyWriter taxo = new DirectoryTaxonomyWriter(taxoDir); // a category document builder will add the categories to a document @@ -101,8 +101,8 @@ public class TwoEnhancementsTest extends LuceneTestCase { List categoryPaths = new ArrayList(); categoryPaths.add(new CategoryPath("a", "b")); - RandomIndexWriter indexWriter = new RandomIndexWriter(random, indexDir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), indexDir, newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); TaxonomyWriter taxo = new DirectoryTaxonomyWriter(taxoDir); // a category document builder will add the categories to a document diff --git a/modules/facet/src/test/org/apache/lucene/facet/enhancements/association/CustomAssociationPropertyTest.java b/modules/facet/src/test/org/apache/lucene/facet/enhancements/association/CustomAssociationPropertyTest.java index 09bcab41a2b..7a3d5abda93 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/enhancements/association/CustomAssociationPropertyTest.java +++ b/modules/facet/src/test/org/apache/lucene/facet/enhancements/association/CustomAssociationPropertyTest.java @@ -57,8 +57,8 @@ public class CustomAssociationPropertyTest extends LuceneTestCase { Directory iDir = newDirectory(); Directory tDir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, iDir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))); + RandomIndexWriter w = new RandomIndexWriter(random(), iDir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))); DirectoryTaxonomyWriter taxoW = new DirectoryTaxonomyWriter(tDir); CategoryContainer cc = new CategoryContainer(); diff --git a/modules/facet/src/test/org/apache/lucene/facet/index/FacetsPayloadProcessorProviderTest.java b/modules/facet/src/test/org/apache/lucene/facet/index/FacetsPayloadProcessorProviderTest.java index ba654ad2008..dd4ea8b580e 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/index/FacetsPayloadProcessorProviderTest.java +++ b/modules/facet/src/test/org/apache/lucene/facet/index/FacetsPayloadProcessorProviderTest.java @@ -91,8 +91,8 @@ public class FacetsPayloadProcessorProviderTest extends LuceneTestCase { private void buildIndexWithFacets(Directory dir, Directory taxDir, boolean asc) throws IOException { IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config); DirectoryTaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(taxDir); for (int i = 1; i <= NUM_DOCS; i++) { diff --git a/modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/OrdinalPolicyTest.java b/modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/OrdinalPolicyTest.java index 7514143a8f2..c6f9d232ea1 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/OrdinalPolicyTest.java +++ b/modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/OrdinalPolicyTest.java @@ -38,7 +38,7 @@ public class OrdinalPolicyTest extends LuceneTestCase { assertFalse("default ordinal policy should not match root", ordinalPolicy .shouldAdd(TaxonomyReader.ROOT_ORDINAL)); for (int i = 0; i < 300; i++) { - int ordinal = 1 + random.nextInt(Integer.MAX_VALUE - 1); + int ordinal = 1 + random().nextInt(Integer.MAX_VALUE - 1); assertTrue("default ordinal policy should match " + ordinal, ordinalPolicy.shouldAdd(ordinal)); } @@ -53,17 +53,17 @@ public class OrdinalPolicyTest extends LuceneTestCase { int[] topLevelOrdinals = new int[10]; String[] topLevelStrings = new String[10]; for (int i = 0; i < 10; i++) { - topLevelStrings[i] = Integer.valueOf(random.nextInt(30)).toString(); + topLevelStrings[i] = Integer.valueOf(random().nextInt(30)).toString(); topLevelOrdinals[i] = taxonomy.addCategory(new CategoryPath( topLevelStrings[i])); } int[] nonTopLevelOrdinals = new int[300]; for (int i = 0; i < 300; i++) { - int nComponents = 2 + random.nextInt(10); + int nComponents = 2 + random().nextInt(10); String[] components = new String[nComponents]; components[0] = topLevelStrings[i % 10]; for (int j = 1; j < components.length; j++) { - components[j] = (Integer.valueOf(random.nextInt(30))).toString(); + components[j] = (Integer.valueOf(random().nextInt(30))).toString(); } nonTopLevelOrdinals[i] = taxonomy.addCategory(new CategoryPath( components)); diff --git a/modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/PathPolicyTest.java b/modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/PathPolicyTest.java index df7f9f3a065..0cef03d8c7f 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/PathPolicyTest.java +++ b/modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/PathPolicyTest.java @@ -38,10 +38,10 @@ public class PathPolicyTest extends LuceneTestCase { assertFalse("default path policy should not accept root", pathPolicy.shouldAdd(cp)); for (int i = 0; i < 300; i++) { - int nComponents = 1 + random.nextInt(10); + int nComponents = 1 + random().nextInt(10); String[] components = new String[nComponents]; for (int j = 0; j < components.length; j++) { - components[j] = (Integer.valueOf(random.nextInt(30))).toString(); + components[j] = (Integer.valueOf(random().nextInt(30))).toString(); } cp = new CategoryPath(components); assertTrue("default path policy should accept " @@ -59,18 +59,18 @@ public class PathPolicyTest extends LuceneTestCase { CategoryPath[] topLevelPaths = new CategoryPath[10]; String[] topLevelStrings = new String[10]; for (int i = 0; i < 10; i++) { - topLevelStrings[i] = Integer.valueOf(random.nextInt(30)).toString(); + topLevelStrings[i] = Integer.valueOf(random().nextInt(30)).toString(); topLevelPaths[i] = new CategoryPath(topLevelStrings[i]); taxonomy.addCategory(topLevelPaths[i]); } CategoryPath[] nonTopLevelPaths = new CategoryPath[300]; for (int i = 0; i < 300; i++) { - int nComponents = 2 + random.nextInt(10); + int nComponents = 2 + random().nextInt(10); String[] components = new String[nComponents]; components[0] = topLevelStrings[i % 10]; for (int j = 1; j < components.length; j++) { - components[j] = (Integer.valueOf(random.nextInt(30))).toString(); + components[j] = (Integer.valueOf(random().nextInt(30))).toString(); } nonTopLevelPaths[i] = new CategoryPath(components); taxonomy.addCategory(nonTopLevelPaths[i]); diff --git a/modules/facet/src/test/org/apache/lucene/facet/index/params/DefaultFacetIndexingParamsTest.java b/modules/facet/src/test/org/apache/lucene/facet/index/params/DefaultFacetIndexingParamsTest.java index 86d4e2acc7e..849c2b5810e 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/index/params/DefaultFacetIndexingParamsTest.java +++ b/modules/facet/src/test/org/apache/lucene/facet/index/params/DefaultFacetIndexingParamsTest.java @@ -92,10 +92,10 @@ public class DefaultFacetIndexingParamsTest extends LuceneTestCase { + seed + ")", pathPolicy.shouldAdd(cp), dfip.getPathPolicy() .shouldAdd(cp)); for (int i = 0; i < 30; i++) { - int nComponents = random.nextInt(10); + int nComponents = random().nextInt(10); String[] components = new String[nComponents]; for (int j = 0; j < components.length; j++) { - components[j] = (Integer.valueOf(random.nextInt(30))).toString(); + components[j] = (Integer.valueOf(random().nextInt(30))).toString(); } cp = new CategoryPath(components); assertEquals("path policy does not match default for " @@ -110,7 +110,7 @@ public class DefaultFacetIndexingParamsTest extends LuceneTestCase { .shouldAdd(TaxonomyReader.ROOT_ORDINAL), dfip .getOrdinalPolicy().shouldAdd(TaxonomyReader.ROOT_ORDINAL)); for (int i = 0; i < 30; i++) { - int ordinal = random.nextInt(); + int ordinal = random().nextInt(); assertEquals("ordinal policy does not match default for " + ordinal + "(seed " + seed + ")", ordinalPolicy.shouldAdd(ordinal), dfip.getOrdinalPolicy().shouldAdd(ordinal)); diff --git a/modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java b/modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java index 5129da84cb4..3b8336a7ee7 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java +++ b/modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java @@ -62,14 +62,14 @@ public abstract class BaseTestTopK extends FacetTestBase { return; } currDoc = doc; - nextInt = random.nextInt(categoriesPow2); + nextInt = random().nextInt(categoriesPow2); nextInt = (int)Math.sqrt(nextInt); } @Override protected String getContent(int doc) { nextInt(doc); - if (random.nextDouble() > 0.1) { + if (random().nextDouble() > 0.1) { return ALPHA + ' ' + BETA; } return ALPHA; @@ -109,6 +109,6 @@ public abstract class BaseTestTopK extends FacetTestBase { @Override protected IndexWriterConfig getIndexWriterConfig(Analyzer analyzer) { - return super.getIndexWriterConfig(analyzer).setMaxBufferedDocs(_TestUtil.nextInt(random, 500, 10000)); + return super.getIndexWriterConfig(analyzer).setMaxBufferedDocs(_TestUtil.nextInt(random(), 500, 10000)); } } diff --git a/modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java b/modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java index 9e94f97bcf6..dcfec0906db 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java +++ b/modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java @@ -96,8 +96,8 @@ public class CategoryListIteratorTest extends LuceneTestCase { Directory dir = newDirectory(); DataTokenStream dts = new DataTokenStream("1",new SortingIntEncoder( new UniqueValuesIntEncoder(new DGapIntEncoder(new VInt8IntEncoder())))); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, + new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < data.length; i++) { dts.setIdx(i); Document doc = new Document(); @@ -144,7 +144,7 @@ public class CategoryListIteratorTest extends LuceneTestCase { } }; // NOTE: test is wired to LogMP... because test relies on certain docids having payloads - RandomIndexWriter writer = new RandomIndexWriter(random, dir, + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, noPayloadsAnalyzer).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < data.length; i++) { Document doc = new Document(); diff --git a/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java b/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java index beba5e077dc..7f3e9b9a05b 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java +++ b/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java @@ -67,11 +67,12 @@ public class DrillDownTest extends LuceneTestCase { nonDefaultParams = new FacetSearchParams(iParams); } + @BeforeClass public static void createIndexes() throws CorruptIndexException, LockObtainFailedException, IOException { dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))); taxoDir = newDirectory(); TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir); diff --git a/modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java b/modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java index b3927832a7c..838b85c5d2e 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java +++ b/modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java @@ -62,8 +62,8 @@ public class TestMultipleCategoryLists extends LuceneTestCase { public void testDefault() throws Exception { Directory[][] dirs = getDirs(); // create and open an index writer - RandomIndexWriter iw = new RandomIndexWriter(random, dirs[0][0], newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter iw = new RandomIndexWriter(random(), dirs[0][0], newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); // create and open a taxonomy writer TaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[0][1], OpenMode.CREATE); @@ -89,7 +89,7 @@ public class TestMultipleCategoryLists extends LuceneTestCase { // Obtain facets results and hand-test them assertCorrectResults(facetsCollector); - DocsEnum td = _TestUtil.docs(random, ir, "$facets", new BytesRef("$fulltree$"), MultiFields.getLiveDocs(ir), null, false); + DocsEnum td = _TestUtil.docs(random(), ir, "$facets", new BytesRef("$fulltree$"), MultiFields.getLiveDocs(ir), null, false); assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); tr.close(); @@ -103,8 +103,8 @@ public class TestMultipleCategoryLists extends LuceneTestCase { public void testCustom() throws Exception { Directory[][] dirs = getDirs(); // create and open an index writer - RandomIndexWriter iw = new RandomIndexWriter(random, dirs[0][0], newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter iw = new RandomIndexWriter(random(), dirs[0][0], newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); // create and open a taxonomy writer TaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[0][1], OpenMode.CREATE); @@ -143,8 +143,8 @@ public class TestMultipleCategoryLists extends LuceneTestCase { public void testTwoCustomsSameField() throws Exception { Directory[][] dirs = getDirs(); // create and open an index writer - RandomIndexWriter iw = new RandomIndexWriter(random, dirs[0][0], newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter iw = new RandomIndexWriter(random(), dirs[0][0], newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); // create and open a taxonomy writer TaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[0][1], OpenMode.CREATE); @@ -183,7 +183,7 @@ public class TestMultipleCategoryLists extends LuceneTestCase { } private void assertPostingListExists(String field, String text, IndexReader ir) throws IOException { - DocsEnum de = _TestUtil.docs(random, ir, field, new BytesRef(text), null, null, false); + DocsEnum de = _TestUtil.docs(random(), ir, field, new BytesRef(text), null, null, false); assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); } @@ -191,8 +191,8 @@ public class TestMultipleCategoryLists extends LuceneTestCase { public void testDifferentFieldsAndText() throws Exception { Directory[][] dirs = getDirs(); // create and open an index writer - RandomIndexWriter iw = new RandomIndexWriter(random, dirs[0][0], newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter iw = new RandomIndexWriter(random(), dirs[0][0], newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); // create and open a taxonomy writer TaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[0][1], OpenMode.CREATE); @@ -231,8 +231,8 @@ public class TestMultipleCategoryLists extends LuceneTestCase { public void testSomeSameSomeDifferent() throws Exception { Directory[][] dirs = getDirs(); // create and open an index writer - RandomIndexWriter iw = new RandomIndexWriter(random, dirs[0][0], newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter iw = new RandomIndexWriter(random(), dirs[0][0], newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); // create and open a taxonomy writer TaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[0][1], OpenMode.CREATE); diff --git a/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java b/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java index 5fbb06a5f29..6a7eeb966aa 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java +++ b/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java @@ -77,9 +77,9 @@ public class TestTopKInEachNodeResultHandler extends LuceneTestCase { } }; - RandomIndexWriter iw = new RandomIndexWriter(random, iDir, + RandomIndexWriter iw = new RandomIndexWriter(random(), iDir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); TaxonomyWriter tw = new DirectoryTaxonomyWriter(tDir); prvt_add(iParams, iw, tw, "a", "b"); prvt_add(iParams, iw, tw, "a", "b", "1"); diff --git a/modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java b/modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java index a3299e2d819..6b2800d285a 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java +++ b/modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java @@ -114,9 +114,9 @@ public class TestTotalFacetCountsCache extends LuceneTestCase { public void testGeneralSynchronization() throws Exception { int numIters = atLeast(2); for (int i = 0; i < numIters; i++) { - doTestGeneralSynchronization(_TestUtil.nextInt(random, 2, 4), - random.nextBoolean() ? -1 : _TestUtil.nextInt(random, 1, 10), - _TestUtil.nextInt(random, 0, 3)); + doTestGeneralSynchronization(_TestUtil.nextInt(random(), 2, 4), + random().nextBoolean() ? -1 : _TestUtil.nextInt(random(), 1, 10), + _TestUtil.nextInt(random(), 0, 3)); } } @@ -143,10 +143,10 @@ public class TestTotalFacetCountsCache extends LuceneTestCase { int cacheSize) throws Exception, CorruptIndexException, IOException, InterruptedException { TFC.setCacheSize(cacheSize); - SlowRAMDirectory slowIndexDir = new SlowRAMDirectory(-1, random); - MockDirectoryWrapper indexDir = new MockDirectoryWrapper(random, slowIndexDir); - SlowRAMDirectory slowTaxoDir = new SlowRAMDirectory(-1, random); - MockDirectoryWrapper taxoDir = new MockDirectoryWrapper(random, slowTaxoDir); + SlowRAMDirectory slowIndexDir = new SlowRAMDirectory(-1, random()); + MockDirectoryWrapper indexDir = new MockDirectoryWrapper(random(), slowIndexDir); + SlowRAMDirectory slowTaxoDir = new SlowRAMDirectory(-1, random()); + MockDirectoryWrapper taxoDir = new MockDirectoryWrapper(random(), slowTaxoDir); // Index documents without the "slowness" @@ -408,7 +408,7 @@ public class TestTotalFacetCountsCache extends LuceneTestCase { // Write index using 'normal' directories IndexWriter w = new IndexWriter(indexDir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(taxoDir); DefaultFacetIndexingParams iParams = new DefaultFacetIndexingParams(); // Add documents and facets diff --git a/modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java b/modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java index 08d63d264aa..978236ede95 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java +++ b/modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java @@ -66,8 +66,8 @@ public class AssociationsFacetRequestTest extends LuceneTestCase { dir = newDirectory(); taxoDir = newDirectory(); // preparations - index, taxonomy, content - RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.KEYWORD, false))); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, + new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))); TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir); diff --git a/modules/facet/src/test/org/apache/lucene/facet/search/params/MultiIteratorsPerCLParamsTest.java b/modules/facet/src/test/org/apache/lucene/facet/search/params/MultiIteratorsPerCLParamsTest.java index 3689d04c707..679c924a973 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/search/params/MultiIteratorsPerCLParamsTest.java +++ b/modules/facet/src/test/org/apache/lucene/facet/search/params/MultiIteratorsPerCLParamsTest.java @@ -166,8 +166,8 @@ public class MultiIteratorsPerCLParamsTest extends LuceneTestCase { private void populateIndex(FacetIndexingParams iParams, Directory indexDir, Directory taxoDir) throws Exception { - RandomIndexWriter writer = new RandomIndexWriter(random, indexDir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexDir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))); TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir); for (CategoryPath[] categories : perDocCategories) { diff --git a/modules/facet/src/test/org/apache/lucene/facet/search/sampling/BaseSampleTestTopK.java b/modules/facet/src/test/org/apache/lucene/facet/search/sampling/BaseSampleTestTopK.java index 3554dd91ec8..22defc11067 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/search/sampling/BaseSampleTestTopK.java +++ b/modules/facet/src/test/org/apache/lucene/facet/search/sampling/BaseSampleTestTopK.java @@ -54,7 +54,7 @@ public abstract class BaseSampleTestTopK extends BaseTestTopK { * is performed. The results are compared to non-sampled ones. */ public void testCountUsingSamping() throws Exception, IOException { - boolean useRandomSampler = random.nextBoolean(); + boolean useRandomSampler = random().nextBoolean(); for (int partitionSize : partitionSizes) { try { initIndex(partitionSize); @@ -132,7 +132,7 @@ public abstract class BaseSampleTestTopK extends BaseTestTopK { samplingParams.setSampingThreshold(11000); //force sampling Sampler sampler = useRandomSampler ? - new RandomSampler(samplingParams, new Random(random.nextLong())) : + new RandomSampler(samplingParams, new Random(random().nextLong())) : new RepeatableSampler(samplingParams); assertTrue("must enable sampling for this test!",sampler.shouldSample(scoredDocIDs)); return sampler; diff --git a/modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomies.java b/modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomies.java index a25cd4cc293..4978a2efe77 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomies.java +++ b/modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomies.java @@ -105,10 +105,10 @@ public class TestAddTaxonomies extends LuceneTestCase { public void testmedium() throws Exception { int numTests = atLeast(3); for (int i = 0; i < numTests; i++) { - dotest(_TestUtil.nextInt(random, 1, 10), - _TestUtil.nextInt(random, 1, 100), - _TestUtil.nextInt(random, 100, 1000), - random.nextBoolean()); + dotest(_TestUtil.nextInt(random(), 1, 10), + _TestUtil.nextInt(random(), 1, 100), + _TestUtil.nextInt(random(), 100, 1000), + random().nextBoolean()); } } @@ -135,7 +135,7 @@ public class TestAddTaxonomies extends LuceneTestCase { DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[i]); DirectoryTaxonomyWriter copytw = new DirectoryTaxonomyWriter(copydirs[i]); for (int j=0; j set2 = new HashSet(); for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 500; - boolean shouldAdd = random.nextBoolean(); + int value = random().nextInt() % 500; + boolean shouldAdd = random().nextBoolean(); if (shouldAdd) { set1.add(value); set2.add(value); @@ -133,8 +133,8 @@ public class IntHashSetTest extends LuceneTestCase { HashSet set = new HashSet(); for (int j = 0; j < 100; ++j) { for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 5000; - boolean shouldAdd = random.nextBoolean(); + int value = random().nextInt() % 5000; + boolean shouldAdd = random().nextBoolean(); if (shouldAdd) { set.add(value); } else { @@ -150,8 +150,8 @@ public class IntHashSetTest extends LuceneTestCase { IntHashSet set = new IntHashSet(); for (int j = 0; j < 100; ++j) { for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 5000; - boolean shouldAdd = random.nextBoolean(); + int value = random().nextInt() % 5000; + boolean shouldAdd = random().nextBoolean(); if (shouldAdd) { set.add(value); } else { @@ -167,8 +167,8 @@ public class IntHashSetTest extends LuceneTestCase { IntHashSet set = new IntHashSet(); for (int j = 0; j < 100; ++j) { for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 5000; - boolean shouldAdd = random.nextBoolean(); + int value = random().nextInt() % 5000; + boolean shouldAdd = random().nextBoolean(); if (shouldAdd) { set.add(value); } else { @@ -195,8 +195,8 @@ public class IntHashSetTest extends LuceneTestCase { HashSet set = new HashSet(); for (int j = 0; j < 100; ++j) { for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 5000; - boolean shouldAdd = random.nextBoolean(); + int value = random().nextInt() % 5000; + boolean shouldAdd = random().nextBoolean(); if (shouldAdd) { set.add(value); } else { @@ -212,8 +212,8 @@ public class IntHashSetTest extends LuceneTestCase { IntHashSet set = new IntHashSet(); for (int j = 0; j < 100; ++j) { for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 5000; - boolean shouldAdd = random.nextBoolean(); + int value = random().nextInt() % 5000; + boolean shouldAdd = random().nextBoolean(); if (shouldAdd) { set.add(value); } else { diff --git a/modules/facet/src/test/org/apache/lucene/util/collections/IntToDoubleMapTest.java b/modules/facet/src/test/org/apache/lucene/util/collections/IntToDoubleMapTest.java index b2e28fa7ef3..8b9bf5c9e06 100644 --- a/modules/facet/src/test/org/apache/lucene/util/collections/IntToDoubleMapTest.java +++ b/modules/facet/src/test/org/apache/lucene/util/collections/IntToDoubleMapTest.java @@ -208,7 +208,7 @@ public class IntToDoubleMapTest extends LuceneTestCase { IntToDoubleMap map = new IntToDoubleMap(); int length = ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; // for a repeatable random sequence - long seed = random.nextLong(); + long seed = random().nextLong(); Random random = new Random(seed); for (int i = 0; i < length; ++i) { diff --git a/modules/facet/src/test/org/apache/lucene/util/collections/IntToIntMapTest.java b/modules/facet/src/test/org/apache/lucene/util/collections/IntToIntMapTest.java index a61e64ce269..88d66d74c8e 100644 --- a/modules/facet/src/test/org/apache/lucene/util/collections/IntToIntMapTest.java +++ b/modules/facet/src/test/org/apache/lucene/util/collections/IntToIntMapTest.java @@ -208,7 +208,7 @@ public class IntToIntMapTest extends LuceneTestCase { int length = ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; // for a repeatable random sequence - long seed = random.nextLong(); + long seed = random().nextLong(); Random random = new Random(seed); for (int i = 0; i < length; ++i) { diff --git a/modules/facet/src/test/org/apache/lucene/util/collections/IntToObjectMapTest.java b/modules/facet/src/test/org/apache/lucene/util/collections/IntToObjectMapTest.java index a1cb69b3205..535275dd6aa 100644 --- a/modules/facet/src/test/org/apache/lucene/util/collections/IntToObjectMapTest.java +++ b/modules/facet/src/test/org/apache/lucene/util/collections/IntToObjectMapTest.java @@ -205,7 +205,7 @@ public class IntToObjectMapTest extends LuceneTestCase { int length = ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; // for a repeatable random sequence - long seed = random.nextLong(); + long seed = random().nextLong(); Random random = new Random(seed); for (int i = 0; i < length; ++i) { diff --git a/modules/facet/src/test/org/apache/lucene/util/collections/ObjectToFloatMapTest.java b/modules/facet/src/test/org/apache/lucene/util/collections/ObjectToFloatMapTest.java index faeb8ef15ef..684f2d69fcf 100644 --- a/modules/facet/src/test/org/apache/lucene/util/collections/ObjectToFloatMapTest.java +++ b/modules/facet/src/test/org/apache/lucene/util/collections/ObjectToFloatMapTest.java @@ -215,7 +215,7 @@ public class ObjectToFloatMapTest extends LuceneTestCase { int length = ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; // for a repeatable random sequence - long seed = random.nextLong(); + long seed = random().nextLong(); Random random = new Random(seed); for (int i = 0; i < length; ++i) { diff --git a/modules/facet/src/test/org/apache/lucene/util/collections/ObjectToIntMapTest.java b/modules/facet/src/test/org/apache/lucene/util/collections/ObjectToIntMapTest.java index d2a7ff2f298..407d0854f08 100644 --- a/modules/facet/src/test/org/apache/lucene/util/collections/ObjectToIntMapTest.java +++ b/modules/facet/src/test/org/apache/lucene/util/collections/ObjectToIntMapTest.java @@ -215,7 +215,7 @@ public class ObjectToIntMapTest extends LuceneTestCase { int length = ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; // for a repeatable random sequence - long seed = random.nextLong(); + long seed = random().nextLong(); Random random = new Random(seed); for (int i = 0; i < length; ++i) { diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java index 900e503f0db..b1040164c67 100644 --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java @@ -31,7 +31,7 @@ public abstract class AbstractGroupingTestCase extends LuceneTestCase { do { // B/c of DV based impl we can't see the difference between an empty string and a null value. // For that reason we don't generate empty string groups. - randomValue = _TestUtil.randomRealisticUnicodeString(random); + randomValue = _TestUtil.randomRealisticUnicodeString(random()); } while ("".equals(randomValue)); return randomValue; } diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java index 448e5613e9e..e94a84f0434 100644 --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java @@ -50,12 +50,12 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { final String groupField = "author"; Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName()); - Type valueType = vts[random.nextInt(vts.length)]; + Type valueType = vts[random().nextInt(vts.length)]; // 0 Document doc = new Document(); @@ -156,14 +156,14 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { } public void testRandom() throws Exception { - int numberOfRuns = _TestUtil.nextInt(random, 3, 6); + int numberOfRuns = _TestUtil.nextInt(random(), 3, 6); for (int iter = 0; iter < numberOfRuns; iter++) { if (VERBOSE) { System.out.println(String.format("TEST: iter=%d total=%d", iter, numberOfRuns)); } - final int numDocs = _TestUtil.nextInt(random, 100, 1000) * RANDOM_MULTIPLIER; - final int numGroups = _TestUtil.nextInt(random, 1, numDocs); + final int numDocs = _TestUtil.nextInt(random(), 100, 1000) * RANDOM_MULTIPLIER; + final int numGroups = _TestUtil.nextInt(random(), 1, numDocs); if (VERBOSE) { System.out.println("TEST: numDocs=" + numDocs + " numGroups=" + numGroups); @@ -175,18 +175,18 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { do { // B/c of DV based impl we can't see the difference between an empty string and a null value. // For that reason we don't generate empty string groups. - randomValue = _TestUtil.randomRealisticUnicodeString(random); + randomValue = _TestUtil.randomRealisticUnicodeString(random()); } while ("".equals(randomValue)); groups.add(new BytesRef(randomValue)); } - final String[] contentStrings = new String[_TestUtil.nextInt(random, 2, 20)]; + final String[] contentStrings = new String[_TestUtil.nextInt(random(), 2, 20)]; if (VERBOSE) { System.out.println("TEST: create fake content"); } for (int contentIDX = 0; contentIDX < contentStrings.length; contentIDX++) { final StringBuilder sb = new StringBuilder(); - sb.append("real").append(random.nextInt(3)).append(' '); - final int fakeCount = random.nextInt(10); + sb.append("real").append(random().nextInt(3)).append(' '); + final int fakeCount = random().nextInt(10); for (int fakeIDX = 0; fakeIDX < fakeCount; fakeIDX++) { sb.append("fake "); } @@ -198,13 +198,13 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random))); + new MockAnalyzer(random()))); boolean preFlex = "Lucene3x".equals(w.w.getConfig().getCodec().getName()); boolean canUseIDV = !preFlex; - Type valueType = vts[random.nextInt(vts.length)]; + Type valueType = vts[random().nextInt(vts.length)]; Document doc = new Document(); Document docNoGroup = new Document(); @@ -233,21 +233,21 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { final GroupDoc[] groupDocs = new GroupDoc[numDocs]; for (int i = 0; i < numDocs; i++) { final BytesRef groupValue; - if (random.nextInt(24) == 17) { + if (random().nextInt(24) == 17) { // So we test the "doc doesn't have the group'd // field" case: groupValue = null; } else { - groupValue = groups.get(random.nextInt(groups.size())); + groupValue = groups.get(random().nextInt(groups.size())); } final GroupDoc groupDoc = new GroupDoc( i, groupValue, - groups.get(random.nextInt(groups.size())), - groups.get(random.nextInt(groups.size())), + groups.get(random().nextInt(groups.size())), + groups.get(random().nextInt(groups.size())), new BytesRef(String.format("%05d", i)), - contentStrings[random.nextInt(contentStrings.length)] + contentStrings[random().nextInt(contentStrings.length)] ); if (VERBOSE) { @@ -313,8 +313,8 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { System.out.println("TEST: searchIter=" + searchIter); } - final String searchTerm = "real" + random.nextInt(3); - boolean sortByScoreOnly = random.nextBoolean(); + final String searchTerm = "real" + random().nextInt(3); + boolean sortByScoreOnly = random().nextBoolean(); Sort sortWithinGroup = getRandomSort(sortByScoreOnly); AbstractAllGroupHeadsCollector allGroupHeadsCollector = createRandomCollector("group", sortWithinGroup, canUseIDV, valueType); s.search(new TermQuery(new Term("content", searchTerm)), allGroupHeadsCollector); @@ -447,22 +447,22 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { private Sort getRandomSort(boolean scoreOnly) { final List sortFields = new ArrayList(); - if (random.nextInt(7) == 2 || scoreOnly) { + if (random().nextInt(7) == 2 || scoreOnly) { sortFields.add(SortField.FIELD_SCORE); } else { - if (random.nextBoolean()) { - if (random.nextBoolean()) { - sortFields.add(new SortField("sort1", SortField.Type.STRING, random.nextBoolean())); + if (random().nextBoolean()) { + if (random().nextBoolean()) { + sortFields.add(new SortField("sort1", SortField.Type.STRING, random().nextBoolean())); } else { - sortFields.add(new SortField("sort2", SortField.Type.STRING, random.nextBoolean())); + sortFields.add(new SortField("sort2", SortField.Type.STRING, random().nextBoolean())); } - } else if (random.nextBoolean()) { - sortFields.add(new SortField("sort1", SortField.Type.STRING, random.nextBoolean())); - sortFields.add(new SortField("sort2", SortField.Type.STRING, random.nextBoolean())); + } else if (random().nextBoolean()) { + sortFields.add(new SortField("sort1", SortField.Type.STRING, random().nextBoolean())); + sortFields.add(new SortField("sort2", SortField.Type.STRING, random().nextBoolean())); } } // Break ties: - if (random.nextBoolean() && !scoreOnly) { + if (random().nextBoolean() && !scoreOnly) { sortFields.add(new SortField("sort3", SortField.Type.STRING)); } else if (!scoreOnly) { sortFields.add(new SortField("id", SortField.Type.INT)); @@ -509,11 +509,11 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { @SuppressWarnings({"unchecked","rawtypes"}) private AbstractAllGroupHeadsCollector createRandomCollector(String groupField, Sort sortWithinGroup, boolean canUseIDV, Type valueType) throws IOException { AbstractAllGroupHeadsCollector collector; - if (random.nextBoolean()) { + if (random().nextBoolean()) { ValueSource vs = new BytesRefFieldSource(groupField); collector = new FunctionAllGroupHeadsCollector(vs, new HashMap(), sortWithinGroup); - } else if (canUseIDV && random.nextBoolean()) { - boolean diskResident = random.nextBoolean(); + } else if (canUseIDV && random().nextBoolean()) { + boolean diskResident = random().nextBoolean(); collector = DVAllGroupHeadsCollector.create(groupField, sortWithinGroup, valueType, diskResident); } else { collector = TermAllGroupHeadsCollector.create(groupField, sortWithinGroup); diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java index a723cf62374..27a8f68a311 100644 --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java @@ -46,10 +46,10 @@ public class AllGroupsCollectorTest extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName()); // 0 @@ -129,10 +129,10 @@ public class AllGroupsCollectorTest extends LuceneTestCase { private AbstractAllGroupsCollector createRandomCollector(String groupField, boolean canUseIDV) throws IOException { AbstractAllGroupsCollector selected; - if (random.nextBoolean() && canUseIDV) { - boolean diskResident = random.nextBoolean(); + if (random().nextBoolean() && canUseIDV) { + boolean diskResident = random().nextBoolean(); selected = DVAllGroupsCollector.create(groupField, Type.BYTES_VAR_SORTED, diskResident); - } else if (random.nextBoolean()) { + } else if (random().nextBoolean()) { selected = new TermAllGroupsCollector(groupField); } else { ValueSource vs = new BytesRefFieldSource(groupField); diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java index a7db199e87c..378ab944e93 100644 --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java @@ -48,6 +48,7 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase { private final String countField = "publisher"; public void testSimple() throws Exception { + Random random = random(); DocValues.Type[] dvTypes = new DocValues.Type[]{ DocValues.Type.VAR_INTS, DocValues.Type.FLOAT_64, @@ -223,6 +224,7 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase { } public void testRandom() throws Exception { + Random random = random(); int numberOfRuns = _TestUtil.nextInt(random, 3, 6); for (int indexIter = 0; indexIter < numberOfRuns; indexIter++) { IndexContext context = createIndexContext(); @@ -340,6 +342,7 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase { String groupField, String countField, DocValues.Type dvType) { + Random random = random(); Collection> searchGroups = firstPassGroupingCollector.getTopGroups(0, false); if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { boolean diskResident = random.nextBoolean(); @@ -353,6 +356,7 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase { @SuppressWarnings({"unchecked","rawtypes"}) private AbstractFirstPassGroupingCollector createRandomFirstPassCollector(DocValues.Type dvType, Sort groupSort, String groupField, int topNGroups) throws IOException { + Random random = random(); if (dvType != null) { if (random.nextBoolean()) { boolean diskResident = random.nextBoolean(); @@ -397,6 +401,7 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase { } private IndexContext createIndexContext() throws Exception { + Random random = random(); DocValues.Type[] dvTypes = new DocValues.Type[]{ DocValues.Type.BYTES_VAR_STRAIGHT, DocValues.Type.BYTES_VAR_SORTED diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java index f1cc06521bf..0209c1f8fbc 100644 --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java @@ -44,12 +44,12 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); boolean canUseDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName()); - boolean useDv = canUseDV && random.nextBoolean(); + boolean useDv = canUseDV && random().nextBoolean(); // 0 Document doc = new Document(); @@ -225,6 +225,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase { } public void testRandom() throws Exception { + Random random = random(); int numberOfRuns = _TestUtil.nextInt(random, 3, 6); for (int indexIter = 0; indexIter < numberOfRuns; indexIter++) { boolean multipleFacetsPerDocument = random.nextBoolean(); @@ -322,6 +323,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase { } private IndexContext createIndexContext(boolean multipleFacetValuesPerDocument) throws IOException { + final Random random = random(); final int numDocs = _TestUtil.nextInt(random, 138, 1145) * RANDOM_MULTIPLIER; final int numGroups = _TestUtil.nextInt(random, 1, numDocs / 4); final int numFacets = _TestUtil.nextInt(random, 1, numDocs / 6); @@ -574,9 +576,9 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase { BytesRef facetPrefixBR = facetPrefix == null ? null : new BytesRef(facetPrefix); if (useDv) { return DVGroupFacetCollector.createDvGroupFacetCollector(groupField, DocValues.Type.BYTES_VAR_SORTED, - random.nextBoolean(), facetField, DocValues.Type.BYTES_VAR_SORTED, random.nextBoolean(), facetPrefixBR, random.nextInt(1024)); + random().nextBoolean(), facetField, DocValues.Type.BYTES_VAR_SORTED, random().nextBoolean(), facetPrefixBR, random().nextInt(1024)); } else { - return TermGroupFacetCollector.createTermGroupFacetCollector(groupField, facetField, multipleFacetsPerDocument, facetPrefixBR, random.nextInt(1024)); + return TermGroupFacetCollector.createTermGroupFacetCollector(groupField, facetField, multipleFacetsPerDocument, facetPrefixBR, random().nextInt(1024)); } } diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java index 74bf47eb299..9058bf89893 100644 --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java @@ -47,10 +47,10 @@ public class GroupingSearchTest extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName()); List documents = new ArrayList(); // 0 @@ -197,12 +197,12 @@ public class GroupingSearchTest extends LuceneTestCase { private GroupingSearch createRandomGroupingSearch(String groupField, Sort groupSort, int docsInGroup, boolean canUseIDV) throws IOException { GroupingSearch groupingSearch; - if (random.nextBoolean()) { + if (random().nextBoolean()) { ValueSource vs = new BytesRefFieldSource(groupField); groupingSearch = new GroupingSearch(vs, new HashMap()); } else { - if (canUseIDV && random.nextBoolean()) { - boolean diskResident = random.nextBoolean(); + if (canUseIDV && random().nextBoolean()) { + boolean diskResident = random().nextBoolean(); groupingSearch = new GroupingSearch(groupField, DocValues.Type.BYTES_VAR_SORTED, diskResident); } else { groupingSearch = new GroupingSearch(groupField); @@ -212,7 +212,7 @@ public class GroupingSearchTest extends LuceneTestCase { groupingSearch.setGroupSort(groupSort); groupingSearch.setGroupDocsLimit(docsInGroup); - if (random.nextBoolean()) { + if (random().nextBoolean()) { groupingSearch.setCachingInMB(4.0, true); } diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java index a72247a75af..a54602c2ea8 100644 --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java @@ -69,10 +69,10 @@ public class TestGrouping extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName()); // 0 Document doc = new Document(); @@ -181,10 +181,10 @@ public class TestGrouping extends LuceneTestCase { private AbstractFirstPassGroupingCollector createRandomFirstPassCollector(String groupField, Sort groupSort, int topDocs, boolean canUseIDV) throws IOException { AbstractFirstPassGroupingCollector selected; - if (canUseIDV && random.nextBoolean()) { - boolean diskResident = random.nextBoolean(); + if (canUseIDV && random().nextBoolean()) { + boolean diskResident = random().nextBoolean(); selected = DVFirstPassGroupingCollector.create(groupSort, topDocs, groupField, Type.BYTES_VAR_SORTED, diskResident); - } else if (random.nextBoolean()) { + } else if (random().nextBoolean()) { ValueSource vs = new BytesRefFieldSource(groupField); selected = new FunctionFirstPassGroupingCollector(vs, new HashMap(), groupSort, topDocs); } else { @@ -198,7 +198,7 @@ public class TestGrouping extends LuceneTestCase { private AbstractFirstPassGroupingCollector createFirstPassCollector(String groupField, Sort groupSort, int topDocs, AbstractFirstPassGroupingCollector firstPassGroupingCollector) throws IOException { if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { - boolean diskResident = random.nextBoolean(); + boolean diskResident = random().nextBoolean(); return DVFirstPassGroupingCollector.create(groupSort, topDocs, groupField, Type.BYTES_VAR_SORTED, diskResident); } else if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { ValueSource vs = new BytesRefFieldSource(groupField); @@ -220,7 +220,7 @@ public class TestGrouping extends LuceneTestCase { boolean fillSortFields) throws IOException { if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { - boolean diskResident = random.nextBoolean(); + boolean diskResident = random().nextBoolean(); Collection> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields); return DVSecondPassGroupingCollector.create(groupField, diskResident, Type.BYTES_VAR_SORTED, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); } else if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { @@ -245,7 +245,7 @@ public class TestGrouping extends LuceneTestCase { boolean getMaxScores, boolean fillSortFields) throws IOException { if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { - boolean diskResident = random.nextBoolean(); + boolean diskResident = random().nextBoolean(); return DVSecondPassGroupingCollector.create(groupField, diskResident, Type.BYTES_VAR_SORTED, (Collection) searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); } else if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) { return new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields); @@ -275,7 +275,7 @@ public class TestGrouping extends LuceneTestCase { if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) { return new TermAllGroupsCollector(groupField); } else if (firstPassGroupingCollector.getClass().isAssignableFrom(DVFirstPassGroupingCollector.class)) { - boolean diskResident = random.nextBoolean(); + boolean diskResident = random().nextBoolean(); return DVAllGroupsCollector.create(groupField, Type.BYTES_VAR_SORTED, diskResident); } else { ValueSource vs = new BytesRefFieldSource(groupField); @@ -372,18 +372,18 @@ public class TestGrouping extends LuceneTestCase { private Sort getRandomSort() { final List sortFields = new ArrayList(); - if (random.nextInt(7) == 2) { + if (random().nextInt(7) == 2) { sortFields.add(SortField.FIELD_SCORE); } else { - if (random.nextBoolean()) { - if (random.nextBoolean()) { - sortFields.add(new SortField("sort1", SortField.Type.STRING, random.nextBoolean())); + if (random().nextBoolean()) { + if (random().nextBoolean()) { + sortFields.add(new SortField("sort1", SortField.Type.STRING, random().nextBoolean())); } else { - sortFields.add(new SortField("sort2", SortField.Type.STRING, random.nextBoolean())); + sortFields.add(new SortField("sort2", SortField.Type.STRING, random().nextBoolean())); } - } else if (random.nextBoolean()) { - sortFields.add(new SortField("sort1", SortField.Type.STRING, random.nextBoolean())); - sortFields.add(new SortField("sort2", SortField.Type.STRING, random.nextBoolean())); + } else if (random().nextBoolean()) { + sortFields.add(new SortField("sort1", SortField.Type.STRING, random().nextBoolean())); + sortFields.add(new SortField("sort2", SortField.Type.STRING, random().nextBoolean())); } } // Break ties: @@ -560,7 +560,7 @@ public class TestGrouping extends LuceneTestCase { private DirectoryReader getDocBlockReader(Directory dir, GroupDoc[] groupDocs) throws IOException { // Coalesce by group, but in random order: - Collections.shuffle(Arrays.asList(groupDocs), random); + Collections.shuffle(Arrays.asList(groupDocs), random()); final Map> groupMap = new HashMap>(); final List groupValues = new ArrayList(); @@ -573,10 +573,10 @@ public class TestGrouping extends LuceneTestCase { } RandomIndexWriter w = new RandomIndexWriter( - random, + random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random))); + new MockAnalyzer(random()))); final List> updateDocs = new ArrayList>(); @@ -605,7 +605,7 @@ public class TestGrouping extends LuceneTestCase { docs.get(docs.size()-1).add(groupEnd); // Add as a doc block: w.addDocuments(docs); - if (group != null && random.nextInt(7) == 4) { + if (group != null && random().nextInt(7) == 4) { updateDocs.add(docs); } } @@ -652,16 +652,16 @@ public class TestGrouping extends LuceneTestCase { } public void testRandom() throws Exception { - int numberOfRuns = _TestUtil.nextInt(random, 3, 6); + int numberOfRuns = _TestUtil.nextInt(random(), 3, 6); for (int iter=0; iter docs = new ArrayList(); docs.add(makeJob("java", 2007)); docs.add(makeJob("python", 2010)); - Collections.shuffle(docs, random); + Collections.shuffle(docs, random()); docs.add(makeResume("Lisa", "United Kingdom")); final List docs2 = new ArrayList(); docs2.add(makeJob("ruby", 2005)); docs2.add(makeJob("java", 2006)); - Collections.shuffle(docs2, random); + Collections.shuffle(docs2, random()); docs2.add(makeResume("Frank", "United States")); addSkillless(w); - boolean turn = random.nextBoolean(); + boolean turn = random().nextBoolean(); w.addDocuments(turn ? docs:docs2); addSkillless(w); @@ -223,15 +223,15 @@ public class TestBlockJoin extends LuceneTestCase { TermQuery us = new TermQuery(new Term("country", "United States")); assertEquals("@ US we have java and ruby", 2, s.search(new ToChildBlockJoinQuery(us, - parentsFilter, random.nextBoolean()), 10).totalHits ); + parentsFilter, random().nextBoolean()), 10).totalHits ); - assertEquals("java skills in US", 1, s.search(new ToChildBlockJoinQuery(us, parentsFilter, random.nextBoolean()), + assertEquals("java skills in US", 1, s.search(new ToChildBlockJoinQuery(us, parentsFilter, random().nextBoolean()), skill("java"), 10).totalHits ); BooleanQuery rubyPython = new BooleanQuery(); rubyPython.add(new TermQuery(new Term("skill", "ruby")), Occur.SHOULD); rubyPython.add(new TermQuery(new Term("skill", "python")), Occur.SHOULD); - assertEquals("ruby skills in US", 1, s.search(new ToChildBlockJoinQuery(us, parentsFilter, random.nextBoolean()), + assertEquals("ruby skills in US", 1, s.search(new ToChildBlockJoinQuery(us, parentsFilter, random().nextBoolean()), new QueryWrapperFilter(rubyPython), 10).totalHits ); r.close(); @@ -239,8 +239,8 @@ public class TestBlockJoin extends LuceneTestCase { } private void addSkillless(final RandomIndexWriter w) throws IOException { - if (random.nextBoolean()) { - w.addDocument(makeResume("Skillless", random.nextBoolean() ? "United Kingdom":"United States")); + if (random().nextBoolean()) { + w.addDocument(makeResume("Skillless", random().nextBoolean() ? "United Kingdom":"United States")); } } @@ -254,7 +254,7 @@ public class TestBlockJoin extends LuceneTestCase { public void testBoostBug() throws Exception { final Directory dir = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, dir); + final RandomIndexWriter w = new RandomIndexWriter(random(), dir); IndexReader r = w.getReader(); w.close(); IndexSearcher s = newSearcher(r); @@ -271,18 +271,18 @@ public class TestBlockJoin extends LuceneTestCase { private String[][] getRandomFields(int maxUniqueValues) { - final String[][] fields = new String[_TestUtil.nextInt(random, 2, 4)][]; + final String[][] fields = new String[_TestUtil.nextInt(random(), 2, 4)][]; for(int fieldID=0;fieldID toDelete = new ArrayList(); // TODO: parallel star join, nested join cases too! - final RandomIndexWriter w = new RandomIndexWriter(random, dir); - final RandomIndexWriter joinW = new RandomIndexWriter(random, joinDir); + final RandomIndexWriter w = new RandomIndexWriter(random(), dir); + final RandomIndexWriter joinW = new RandomIndexWriter(random(), joinDir); for(int parentDocID=0;parentDocID joinResults = c.getTopGroups(childJoinQuery, childSort, 0, hitsPerGroup, 0, true); @@ -641,27 +641,27 @@ public class TestBlockJoin extends LuceneTestCase { // Get random query against parent documents: final Query parentQuery2; - if (random.nextInt(3) == 2) { - final int fieldID = random.nextInt(parentFields.length); + if (random().nextInt(3) == 2) { + final int fieldID = random().nextInt(parentFields.length); parentQuery2 = new TermQuery(new Term("parent" + fieldID, - parentFields[fieldID][random.nextInt(parentFields[fieldID].length)])); - } else if (random.nextInt(3) == 2) { + parentFields[fieldID][random().nextInt(parentFields[fieldID].length)])); + } else if (random().nextInt(3) == 2) { BooleanQuery bq = new BooleanQuery(); parentQuery2 = bq; - final int numClauses = _TestUtil.nextInt(random, 2, 4); + final int numClauses = _TestUtil.nextInt(random(), 2, 4); boolean didMust = false; for(int clauseIDX=0;clauseIDX docs = new ArrayList(); @@ -939,7 +939,7 @@ public class TestBlockJoin extends LuceneTestCase { public void testAdvanceSingleParentSingleChild() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document childDoc = new Document(); childDoc.add(newField("child", "1", StringField.TYPE_UNSTORED)); Document parentDoc = new Document(); @@ -963,7 +963,7 @@ public class TestBlockJoin extends LuceneTestCase { public void testAdvanceSingleParentNoChild() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(new LogDocMergePolicy())); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(new LogDocMergePolicy())); Document parentDoc = new Document(); parentDoc.add(newField("parent", "1", StringField.TYPE_UNSTORED)); parentDoc.add(newField("isparent", "yes", StringField.TYPE_UNSTORED)); diff --git a/modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java b/modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java index 1b1a040bde2..8040ded1176 100644 --- a/modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java +++ b/modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java @@ -42,10 +42,10 @@ public class TestJoinUtil extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); // 0 Document doc = new Document(); @@ -120,16 +120,16 @@ public class TestJoinUtil extends LuceneTestCase { @Test public void testSingleValueRandomJoin() throws Exception { - int maxIndexIter = _TestUtil.nextInt(random, 6, 12); - int maxSearchIter = _TestUtil.nextInt(random, 13, 26); + int maxIndexIter = _TestUtil.nextInt(random(), 6, 12); + int maxSearchIter = _TestUtil.nextInt(random(), 13, 26); executeRandomJoin(false, maxIndexIter, maxSearchIter); } @Test // This test really takes more time, that is why the number of iterations are smaller. public void testMultiValueRandomJoin() throws Exception { - int maxIndexIter = _TestUtil.nextInt(random, 3, 6); - int maxSearchIter = _TestUtil.nextInt(random, 6, 12); + int maxIndexIter = _TestUtil.nextInt(random(), 3, 6); + int maxSearchIter = _TestUtil.nextInt(random(), 6, 12); executeRandomJoin(true, maxIndexIter, maxSearchIter); } @@ -140,11 +140,11 @@ public class TestJoinUtil extends LuceneTestCase { } Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + random(), dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy()) + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy()) ); - int numberOfDocumentsToIndex = _TestUtil.nextInt(random, 87, 764); + int numberOfDocumentsToIndex = _TestUtil.nextInt(random(), 87, 764); IndexIterationContext context = createContext(numberOfDocumentsToIndex, w, multipleValuesPerDocument); IndexReader topLevelReader = w.getReader(); @@ -155,7 +155,7 @@ public class TestJoinUtil extends LuceneTestCase { } IndexSearcher indexSearcher = newSearcher(topLevelReader); - int r = random.nextInt(context.randomUniqueValues.length); + int r = random().nextInt(context.randomUniqueValues.length); boolean from = context.randomFrom[r]; String randomValue = context.randomUniqueValues[r]; FixedBitSet expectedResult = createExpectedResult(randomValue, from, indexSearcher.getIndexReader(), context); @@ -229,28 +229,28 @@ public class TestJoinUtil extends LuceneTestCase { for (int i = 0; i < numRandomValues; i++) { String uniqueRandomValue; do { - uniqueRandomValue = _TestUtil.randomRealisticUnicodeString(random); + uniqueRandomValue = _TestUtil.randomRealisticUnicodeString(random()); // uniqueRandomValue = _TestUtil.randomSimpleString(random); } while ("".equals(uniqueRandomValue) || trackSet.contains(uniqueRandomValue)); // Generate unique values and empty strings aren't allowed. trackSet.add(uniqueRandomValue); - context.randomFrom[i] = random.nextBoolean(); + context.randomFrom[i] = random().nextBoolean(); context.randomUniqueValues[i] = uniqueRandomValue; } for (int i = 0; i < nDocs; i++) { String id = Integer.toString(i); - int randomI = random.nextInt(context.randomUniqueValues.length); + int randomI = random().nextInt(context.randomUniqueValues.length); String value = context.randomUniqueValues[randomI]; Document document = new Document(); - document.add(newField(random, "id", id, TextField.TYPE_STORED)); - document.add(newField(random, "value", value, TextField.TYPE_STORED)); + document.add(newField(random(), "id", id, TextField.TYPE_STORED)); + document.add(newField(random(), "value", value, TextField.TYPE_STORED)); boolean from = context.randomFrom[randomI]; - int numberOfLinkValues = multipleValuesPerDocument ? 2 + random.nextInt(10) : 1; + int numberOfLinkValues = multipleValuesPerDocument ? 2 + random().nextInt(10) : 1; RandomDoc doc = new RandomDoc(id, numberOfLinkValues, value); for (int j = 0; j < numberOfLinkValues; j++) { - String linkValue = context.randomUniqueValues[random.nextInt(context.randomUniqueValues.length)]; + String linkValue = context.randomUniqueValues[random().nextInt(context.randomUniqueValues.length)]; doc.linkValues.add(linkValue); if (from) { if (!context.fromDocuments.containsKey(linkValue)) { @@ -262,7 +262,7 @@ public class TestJoinUtil extends LuceneTestCase { context.fromDocuments.get(linkValue).add(doc); context.randomValueFromDocs.get(value).add(doc); - document.add(newField(random, "from", linkValue, TextField.TYPE_STORED)); + document.add(newField(random(), "from", linkValue, TextField.TYPE_STORED)); } else { if (!context.toDocuments.containsKey(linkValue)) { context.toDocuments.put(linkValue, new ArrayList()); @@ -273,7 +273,7 @@ public class TestJoinUtil extends LuceneTestCase { context.toDocuments.get(linkValue).add(doc); context.randomValueToDocs.get(value).add(doc); - document.add(newField(random, "to", linkValue, TextField.TYPE_STORED)); + document.add(newField(random(), "to", linkValue, TextField.TYPE_STORED)); } } @@ -285,7 +285,7 @@ public class TestJoinUtil extends LuceneTestCase { } w.addDocument(document); - if (random.nextInt(10) == 4) { + if (random().nextInt(10) == 4) { w.commit(); } if (VERBOSE) { diff --git a/modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java b/modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java index 37ca1aed0de..61903db7413 100644 --- a/modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java +++ b/modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java @@ -48,7 +48,7 @@ public class BooleanFilterTest extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); //Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags addDoc(writer, "admin guest", "010", "20040101", "Y"); diff --git a/modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java b/modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java index 3614daf3751..78dca3f9c7a 100644 --- a/modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java +++ b/modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java @@ -55,7 +55,7 @@ public class ChainedFilterTest extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); Calendar cal = new GregorianCalendar(); cal.clear(); cal.setTimeInMillis(1041397200000L); // 2003 January 01 @@ -189,7 +189,7 @@ public class ChainedFilterTest extends LuceneTestCase { public void testWithCachingFilter() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); IndexReader reader = writer.getReader(); writer.close(); diff --git a/modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java b/modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java index 8814b7e708a..7381ca8fe02 100644 --- a/modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java +++ b/modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java @@ -54,7 +54,7 @@ public class TermsFilterTest extends LuceneTestCase { public void testMissingTerms() throws Exception { String fieldName = "field1"; Directory rd = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, rd); + RandomIndexWriter w = new RandomIndexWriter(random(), rd); for (int i = 0; i < 100; i++) { Document doc = new Document(); int term = i * 10; //terms are units of 10; @@ -90,7 +90,7 @@ public class TermsFilterTest extends LuceneTestCase { public void testMissingField() throws Exception { String fieldName = "field1"; Directory rd1 = newDirectory(); - RandomIndexWriter w1 = new RandomIndexWriter(random, rd1); + RandomIndexWriter w1 = new RandomIndexWriter(random(), rd1); Document doc = new Document(); doc.add(newField(fieldName, "content1", StringField.TYPE_STORED)); w1.addDocument(doc); @@ -99,7 +99,7 @@ public class TermsFilterTest extends LuceneTestCase { fieldName = "field2"; Directory rd2 = newDirectory(); - RandomIndexWriter w2 = new RandomIndexWriter(random, rd2); + RandomIndexWriter w2 = new RandomIndexWriter(random(), rd2); doc = new Document(); doc.add(newField(fieldName, "content2", StringField.TYPE_STORED)); w2.addDocument(doc); diff --git a/modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java b/modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java index 32c8877cdf3..5233aba79e2 100755 --- a/modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java +++ b/modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java @@ -303,11 +303,11 @@ public class TestCustomScoreQuery extends FunctionTestSetup { assertEquals("queries should have same #hits",h1.size(),h4CustomAdd.size()); assertEquals("queries should have same #hits",h1.size(),h5CustomMulAdd.size()); - QueryUtils.check(random, q1, s, rarely()); - QueryUtils.check(random, q2, s, rarely()); - QueryUtils.check(random, q3, s, rarely()); - QueryUtils.check(random, q4, s, rarely()); - QueryUtils.check(random, q5, s, rarely()); + QueryUtils.check(random(), q1, s, rarely()); + QueryUtils.check(random(), q2, s, rarely()); + QueryUtils.check(random(), q3, s, rarely()); + QueryUtils.check(random(), q4, s, rarely()); + QueryUtils.check(random(), q5, s, rarely()); // verify scores ratios for (final Integer doc : h1.keySet()) { diff --git a/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java b/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java index ebfad3de00a..f6d76229700 100644 --- a/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java +++ b/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java @@ -92,12 +92,12 @@ public abstract class FunctionTestSetup extends LuceneTestCase { } // prepare a small index with just a few documents. dir = newDirectory(); - anlzr = new MockAnalyzer(random); + anlzr = new MockAnalyzer(random()); IndexWriterConfig iwc = newIndexWriterConfig( TEST_VERSION_CURRENT, anlzr).setMergePolicy(newLogMergePolicy()); if (doMultiSegment) { - iwc.setMaxBufferedDocs(_TestUtil.nextInt(random, 2, 7)); + iwc.setMaxBufferedDocs(_TestUtil.nextInt(random(), 2, 7)); } - RandomIndexWriter iw = new RandomIndexWriter(random, dir, iwc); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); // add docs not exactly in natural ID order, to verify we do check the order of docs by scores int remaining = N_DOCS; boolean done[] = new boolean[N_DOCS]; diff --git a/modules/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java b/modules/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java index 3be1b4b9b63..5aea75aa6b7 100755 --- a/modules/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java +++ b/modules/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java @@ -84,7 +84,7 @@ public class TestFieldScoreQuery extends FunctionTestSetup { IndexReader r = IndexReader.open(dir); IndexSearcher s = new IndexSearcher(r); log("test: "+ functionQuery); - QueryUtils.check(random, functionQuery,s); + QueryUtils.check(random(), functionQuery,s); ScoreDoc[] h = s.search(functionQuery, null, 1000).scoreDocs; assertEquals("All docs should be matched!",N_DOCS,h.length); String prevID = "ID"+(N_DOCS+1); // greater than all ids of docs in this test diff --git a/modules/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java b/modules/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java index 0d33d1a5a43..08909712f5f 100644 --- a/modules/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java +++ b/modules/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java @@ -72,7 +72,7 @@ public class TestOrdValues extends FunctionTestSetup { Query q = new FunctionQuery(vs); log("test: " + q); - QueryUtils.check(random, q, s); + QueryUtils.check(random(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; assertEquals("All docs should be matched!", N_DOCS, h.length); String prevID = inOrder diff --git a/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java b/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java index fe71dffd7d9..3635711b0d4 100644 --- a/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java +++ b/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java @@ -45,7 +45,7 @@ public class TestMoreLikeThis extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random(), directory); // Add series of docs with specific information for MoreLikeThis addDoc(writer, "lucene"); @@ -73,7 +73,7 @@ public class TestMoreLikeThis extends LuceneTestCase { Map originalValues = getOriginalValues(); MoreLikeThis mlt = new MoreLikeThis(reader); - mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + mlt.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); mlt.setMinDocFreq(1); mlt.setMinTermFreq(1); mlt.setMinWordLen(1); @@ -107,7 +107,7 @@ public class TestMoreLikeThis extends LuceneTestCase { private Map getOriginalValues() throws IOException { Map originalValues = new HashMap(); MoreLikeThis mlt = new MoreLikeThis(reader); - mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + mlt.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); mlt.setMinDocFreq(1); mlt.setMinTermFreq(1); mlt.setMinWordLen(1); @@ -127,7 +127,7 @@ public class TestMoreLikeThis extends LuceneTestCase { // LUCENE-3326 public void testMultiFields() throws Exception { MoreLikeThis mlt = new MoreLikeThis(reader); - mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + mlt.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); mlt.setMinDocFreq(1); mlt.setMinTermFreq(1); mlt.setMinWordLen(1); diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java index 80cfff336de..46437909f1a 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java @@ -98,47 +98,45 @@ public class TestAnalyzingQueryParser extends LuceneTestCase { org.apache.lucene.search.Query q = qp.parse(s); return q.toString("field"); } + + final static class FoldingFilter extends TokenFilter { + final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); -} + public FoldingFilter(TokenStream input) { + super(input); + } -final class TestFoldingFilter extends TokenFilter { - final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - - public TestFoldingFilter(TokenStream input) { - super(input); - } - - @Override - public boolean incrementToken() throws IOException { - if (input.incrementToken()) { - char term[] = termAtt.buffer(); - for (int i = 0; i < term.length; i++) - switch(term[i]) { - case 'ü': - term[i] = 'u'; - break; - case 'ö': - term[i] = 'o'; - break; - case 'é': - term[i] = 'e'; - break; - case 'ï': - term[i] = 'i'; - break; - } - return true; - } else { - return false; + @Override + public boolean incrementToken() throws IOException { + if (input.incrementToken()) { + char term[] = termAtt.buffer(); + for (int i = 0; i < term.length; i++) + switch(term[i]) { + case 'ü': + term[i] = 'u'; + break; + case 'ö': + term[i] = 'o'; + break; + case 'é': + term[i] = 'e'; + break; + case 'ï': + term[i] = 'i'; + break; + } + return true; + } else { + return false; + } } } -} -final class ASCIIAnalyzer extends Analyzer { - - @Override - public TokenStreamComponents createComponents(String fieldName, Reader reader) { - Tokenizer result = new MockTokenizer(reader, MockTokenizer.SIMPLE, true); - return new TokenStreamComponents(result, new TestFoldingFilter(result)); - } -} + final static class ASCIIAnalyzer extends Analyzer { + @Override + public TokenStreamComponents createComponents(String fieldName, Reader reader) { + Tokenizer result = new MockTokenizer(reader, MockTokenizer.SIMPLE, true); + return new TokenStreamComponents(result, new FoldingFilter(result)); + } + } +} \ No newline at end of file diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java index 949e233a823..85314d88b0b 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java @@ -67,7 +67,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testSimple() throws Exception { String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new MockAnalyzer(random)); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new MockAnalyzer(random())); Query q = mfqp.parse("one"); assertEquals("b:one t:one", q.toString()); @@ -130,7 +130,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { boosts.put("b", Float.valueOf(5)); boosts.put("t", Float.valueOf(10)); String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new MockAnalyzer(random), boosts); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new MockAnalyzer(random()), boosts); //Check for simple @@ -156,24 +156,24 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testStaticMethod1() throws ParseException { String[] fields = {"b", "t"}; String[] queries = {"one", "two"}; - Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, new MockAnalyzer(random)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, new MockAnalyzer(random())); assertEquals("b:one t:two", q.toString()); String[] queries2 = {"+one", "+two"}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries2, fields, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries2, fields, new MockAnalyzer(random())); assertEquals("(+b:one) (+t:two)", q.toString()); String[] queries3 = {"one", "+two"}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries3, fields, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries3, fields, new MockAnalyzer(random())); assertEquals("b:one (+t:two)", q.toString()); String[] queries4 = {"one +more", "+two"}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries4, fields, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries4, fields, new MockAnalyzer(random())); assertEquals("(b:one +b:more) (+t:two)", q.toString()); String[] queries5 = {"blah"}; try { - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries5, fields, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries5, fields, new MockAnalyzer(random())); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -195,15 +195,15 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testStaticMethod2() throws ParseException { String[] fields = {"b", "t"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new MockAnalyzer(random)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new MockAnalyzer(random())); assertEquals("+b:one -t:one", q.toString()); - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new MockAnalyzer(random())); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new MockAnalyzer(random())); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -215,15 +215,15 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { //int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new MockAnalyzer(random));//, fields, flags, new MockAnalyzer(random)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new MockAnalyzer(random()));//, fields, flags, new MockAnalyzer(random)); assertEquals("+b:one -t:one", q.toString()); - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new MockAnalyzer(random())); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new MockAnalyzer(random())); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -235,12 +235,12 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { String[] fields = {"f1", "f2", "f3"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD}; - Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new MockAnalyzer(random)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new MockAnalyzer(random())); assertEquals("+f1:one -f2:two f3:three", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new MockAnalyzer(random())); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -251,12 +251,12 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { String[] queries = {"one", "two"}; String[] fields = {"b", "t"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new MockAnalyzer(random)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new MockAnalyzer(random())); assertEquals("+b:one -t:two", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new MockAnalyzer(random())); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -278,7 +278,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { } public void testStopWordSearching() throws Exception { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); @@ -302,7 +302,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { * Return empty tokens for field "f1". */ private static class AnalyzerReturningNull extends Analyzer { - MockAnalyzer stdAnalyzer = new MockAnalyzer(random); + MockAnalyzer stdAnalyzer = new MockAnalyzer(random()); public AnalyzerReturningNull() { super(new PerFieldReuseStrategy()); diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java index d8e7ef16f36..7d1992e0961 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java @@ -30,7 +30,7 @@ public class TestQueryParser extends QueryParserTestBase { @Override public QueryParser getParser(Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a); qp.setDefaultOperator(QueryParserBase.OR_OPERATOR); return qp; diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java index 37109b0f0dc..25c0efb61d3 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java @@ -35,8 +35,8 @@ import org.apache.lucene.util.LuceneTestCase; public class TestComplexPhraseQuery extends LuceneTestCase { Directory rd; - Analyzer analyzer = new MockAnalyzer(random); - + Analyzer analyzer; + DocData docsContent[] = { new DocData("john smith", "1"), new DocData("johathon smith", "2"), new DocData("john percival smith", "3"), @@ -113,6 +113,8 @@ public class TestComplexPhraseQuery extends LuceneTestCase { @Override public void setUp() throws Exception { super.setUp(); + + analyzer = new MockAnalyzer(random()); rd = newDirectory(); IndexWriter w = new IndexWriter(rd, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); for (int i = 0; i < docsContent.length; i++) { diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java index 661d0136cbf..cb52c0af952 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java @@ -44,7 +44,7 @@ public class TestExtendableQueryParser extends QueryParserTestBase { public QueryParser getParser(Analyzer a, Extensions extensions) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); QueryParser qp = extensions == null ? new ExtendableQueryParser( TEST_VERSION_CURRENT, "field", a) : new ExtendableQueryParser( TEST_VERSION_CURRENT, "field", a, extensions); diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java index c7755da8717..232fb994559 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java @@ -132,7 +132,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { public PrecedenceQueryParser getParser(Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); PrecedenceQueryParser qp = new PrecedenceQueryParser(); qp.setAnalyzer(a); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.OR); @@ -178,7 +178,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { public Query getQueryDOA(String query, Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); PrecedenceQueryParser qp = new PrecedenceQueryParser(); qp.setAnalyzer(a); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); @@ -239,7 +239,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { "+(title:dog title:cat) -author:\"bob dole\""); PrecedenceQueryParser qp = new PrecedenceQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random)); + qp.setAnalyzer(new MockAnalyzer(random())); // make sure OR is the default: assertEquals(StandardQueryConfigHandler.Operator.OR, qp.getDefaultOperator()); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); @@ -253,7 +253,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { } public void testPunct() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a&b", a, "a&b"); assertQueryEquals("a&&b", a, "a&&b"); assertQueryEquals(".NET", a, ".NET"); @@ -273,7 +273,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new MockAnalyzer(random); + Analyzer a = new MockAnalyzer(random()); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -412,7 +412,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { final String defaultField = "default"; final String monthField = "month"; final String hourField = "hour"; - PrecedenceQueryParser qp = new PrecedenceQueryParser(new MockAnalyzer(random)); + PrecedenceQueryParser qp = new PrecedenceQueryParser(new MockAnalyzer(random())); Map fieldMap = new HashMap(); // set a field specific date resolution @@ -474,7 +474,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { } public void testEscaped() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a\\-b:c", a, "a-b:c"); assertQueryEquals("a\\+b:c", a, "a+b:c"); @@ -540,7 +540,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { public void testBoost() throws Exception { CharacterRunAutomaton stopSet = new CharacterRunAutomaton(BasicAutomata.makeString("on")); - Analyzer oneStopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true); + Analyzer oneStopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet, true); PrecedenceQueryParser qp = new PrecedenceQueryParser(); qp.setAnalyzer(oneStopAnalyzer); @@ -555,7 +555,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { q = qp.parse("\"on\"^1.0", "field"); assertNotNull(q); - q = getParser(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)).parse("the^3", + q = getParser(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)).parse("the^3", "field"); assertNotNull(q); } @@ -571,7 +571,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { public void testBooleanQuery() throws Exception { BooleanQuery.setMaxClauseCount(2); try { - getParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("one two three", "field"); + getParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).parse("one two three", "field"); fail("ParseException expected due to too many boolean clauses"); } catch (QueryNodeException expected) { // too many boolean clauses, so ParseException is expected @@ -580,7 +580,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { // LUCENE-792 public void testNOT() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertQueryEquals("NOT foo AND bar", a, "-foo +bar"); } @@ -589,7 +589,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { * issue has been corrected. */ public void testPrecedence() throws Exception { - PrecedenceQueryParser parser = getParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + PrecedenceQueryParser parser = getParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); Query query1 = parser.parse("A AND B OR C AND D", "field"); Query query2 = parser.parse("(A AND B) OR (C AND D)", "field"); assertEquals(query1, query2); diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java index 2e1aeb4e48f..42b2bca61a1 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java @@ -81,7 +81,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { String[] fields = { "b", "t" }; StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); - mfqp.setAnalyzer(new MockAnalyzer(random)); + mfqp.setAnalyzer(new MockAnalyzer(random())); Query q = mfqp.parse("one", null); assertEquals("b:one t:one", q.toString()); @@ -151,7 +151,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); mfqp.setFieldsBoost(boosts); - mfqp.setAnalyzer(new MockAnalyzer(random)); + mfqp.setAnalyzer(new MockAnalyzer(random())); // Check for simple Query q = mfqp.parse("one", null); @@ -179,24 +179,24 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { public void testStaticMethod1() throws QueryNodeException { String[] fields = { "b", "t" }; String[] queries = { "one", "two" }; - Query q = QueryParserUtil.parse(queries, fields, new MockAnalyzer(random)); + Query q = QueryParserUtil.parse(queries, fields, new MockAnalyzer(random())); assertEquals("b:one t:two", q.toString()); String[] queries2 = { "+one", "+two" }; - q = QueryParserUtil.parse(queries2, fields, new MockAnalyzer(random)); + q = QueryParserUtil.parse(queries2, fields, new MockAnalyzer(random())); assertEquals("(+b:one) (+t:two)", q.toString()); String[] queries3 = { "one", "+two" }; - q = QueryParserUtil.parse(queries3, fields, new MockAnalyzer(random)); + q = QueryParserUtil.parse(queries3, fields, new MockAnalyzer(random())); assertEquals("b:one (+t:two)", q.toString()); String[] queries4 = { "one +more", "+two" }; - q = QueryParserUtil.parse(queries4, fields, new MockAnalyzer(random)); + q = QueryParserUtil.parse(queries4, fields, new MockAnalyzer(random())); assertEquals("(b:one +b:more) (+t:two)", q.toString()); String[] queries5 = { "blah" }; try { - q = QueryParserUtil.parse(queries5, fields, new MockAnalyzer(random)); + q = QueryParserUtil.parse(queries5, fields, new MockAnalyzer(random())); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -220,15 +220,15 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT }; Query q = QueryParserUtil.parse("one", fields, flags, - new MockAnalyzer(random)); + new MockAnalyzer(random())); assertEquals("+b:one -t:one", q.toString()); - q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random)); + q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random())); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; - q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random)); + q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random())); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -241,19 +241,19 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { BooleanClause.Occur.MUST_NOT }; StandardQueryParser parser = new StandardQueryParser(); parser.setMultiFields(fields); - parser.setAnalyzer(new MockAnalyzer(random)); + parser.setAnalyzer(new MockAnalyzer(random())); Query q = QueryParserUtil.parse("one", fields, flags, - new MockAnalyzer(random));// , fields, flags, new + new MockAnalyzer(random()));// , fields, flags, new // MockAnalyzer()); assertEquals("+b:one -t:one", q.toString()); - q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random)); + q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random())); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; - q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random)); + q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random())); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -266,13 +266,13 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD }; Query q = QueryParserUtil.parse(queries, fields, flags, - new MockAnalyzer(random)); + new MockAnalyzer(random())); assertEquals("+f1:one -f2:two f3:three", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; q = QueryParserUtil - .parse(queries, fields, flags2, new MockAnalyzer(random)); + .parse(queries, fields, flags2, new MockAnalyzer(random())); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -285,13 +285,13 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT }; Query q = QueryParserUtil.parse(queries, fields, flags, - new MockAnalyzer(random)); + new MockAnalyzer(random())); assertEquals("+b:one -t:two", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; q = QueryParserUtil - .parse(queries, fields, flags2, new MockAnalyzer(random)); + .parse(queries, fields, flags2, new MockAnalyzer(random())); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -317,7 +317,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { } public void testStopWordSearching() throws Exception { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(random()); Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); @@ -343,7 +343,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { * Return empty tokens for field "f1". */ private static final class AnalyzerReturningNull extends Analyzer { - MockAnalyzer stdAnalyzer = new MockAnalyzer(random); + MockAnalyzer stdAnalyzer = new MockAnalyzer(random()); public AnalyzerReturningNull() { super(new PerFieldReuseStrategy()); diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java index bf558ae2aaa..2e7388d5e2e 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java @@ -96,7 +96,7 @@ public class TestNumericQueryParser extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { - ANALYZER = new MockAnalyzer(random); + ANALYZER = new MockAnalyzer(random()); qp = new StandardQueryParser(ANALYZER); @@ -112,10 +112,10 @@ public class TestNumericQueryParser extends LuceneTestCase { } dateFormatSanityCheckPass = true; - LOCALE = randomLocale(random); - TIMEZONE = randomTimeZone(random); - DATE_STYLE = randomDateStyle(random); - TIME_STYLE = randomDateStyle(random); + LOCALE = randomLocale(random()); + TIMEZONE = randomTimeZone(random()); + DATE_STYLE = randomDateStyle(random()); + TIME_STYLE = randomDateStyle(random()); // assumes localized date pattern will have at least year, month, day, // hour, minute @@ -130,7 +130,7 @@ public class TestNumericQueryParser extends LuceneTestCase { DATE_FORMAT = new NumberDateFormat(dateFormat); do { - randomDate = random.nextLong(); + randomDate = random().nextLong(); // prune date value so it doesn't pass in insane values to some // calendars. @@ -154,26 +154,26 @@ public class TestNumericQueryParser extends LuceneTestCase { } while (!dateFormatSanityCheckPass); NUMBER_FORMAT = NumberFormat.getNumberInstance(LOCALE); - NUMBER_FORMAT.setMaximumFractionDigits((random.nextInt() & 20) + 1); - NUMBER_FORMAT.setMinimumFractionDigits((random.nextInt() & 20) + 1); - NUMBER_FORMAT.setMaximumIntegerDigits((random.nextInt() & 20) + 1); - NUMBER_FORMAT.setMinimumIntegerDigits((random.nextInt() & 20) + 1); + NUMBER_FORMAT.setMaximumFractionDigits((random().nextInt() & 20) + 1); + NUMBER_FORMAT.setMinimumFractionDigits((random().nextInt() & 20) + 1); + NUMBER_FORMAT.setMaximumIntegerDigits((random().nextInt() & 20) + 1); + NUMBER_FORMAT.setMinimumIntegerDigits((random().nextInt() & 20) + 1); double randomDouble; long randomLong; int randomInt; float randomFloat; - while ((randomLong = normalizeNumber(Math.abs(random.nextLong())) + while ((randomLong = normalizeNumber(Math.abs(random().nextLong())) .longValue()) == 0L) ; - while ((randomDouble = normalizeNumber(Math.abs(random.nextDouble())) + while ((randomDouble = normalizeNumber(Math.abs(random().nextDouble())) .doubleValue()) == 0.0) ; - while ((randomFloat = normalizeNumber(Math.abs(random.nextFloat())) + while ((randomFloat = normalizeNumber(Math.abs(random().nextFloat())) .floatValue()) == 0.0f) ; - while ((randomInt = normalizeNumber(Math.abs(random.nextInt())).intValue()) == 0) + while ((randomInt = normalizeNumber(Math.abs(random().nextInt())).intValue()) == 0) ; randomNumberMap.put(NumericType.LONG.name(), randomLong); @@ -185,9 +185,9 @@ public class TestNumericQueryParser extends LuceneTestCase { RANDOM_NUMBER_MAP = Collections.unmodifiableMap(randomNumberMap); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)) + RandomIndexWriter writer = new RandomIndexWriter(random(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) + .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000)) .setMergePolicy(newLogMergePolicy())); Document doc = new Document(); diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java index 20fbc23e22e..cb330b4babb 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java @@ -195,7 +195,7 @@ public class TestQPHelper extends LuceneTestCase { public StandardQueryParser getParser(Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(a); @@ -285,7 +285,7 @@ public class TestQPHelper extends LuceneTestCase { public Query getQueryDOA(String query, Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(a); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); @@ -305,7 +305,7 @@ public class TestQPHelper extends LuceneTestCase { } public void testConstantScoreAutoRewrite() throws Exception { - StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); Query q = qp.parse("foo*bar", "field"); assertTrue(q instanceof WildcardQuery); assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((MultiTermQuery) q).getRewriteMethod()); @@ -423,9 +423,9 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEquals("field=a", null, "a"); assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2"); assertQueryEquals("term term term", null, "term term term"); - assertQueryEquals("t�rm term term", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + assertQueryEquals("t�rm term term", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), "t�rm term term"); - assertQueryEquals("�mlaut", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), "�mlaut"); + assertQueryEquals("�mlaut", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), "�mlaut"); // FIXME: change MockAnalyzer to not extend CharTokenizer for this test //assertQueryEquals("\"\"", new KeywordAnalyzer(), ""); @@ -483,7 +483,7 @@ public class TestQPHelper extends LuceneTestCase { } public void testPunct() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a&b", a, "a&b"); assertQueryEquals("a&&b", a, "a&&b"); assertQueryEquals(".NET", a, ".NET"); @@ -504,7 +504,7 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -755,7 +755,7 @@ public class TestQPHelper extends LuceneTestCase { } public void testEscaped() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); /* * assertQueryEquals("\\[brackets", a, "\\[brackets"); @@ -854,7 +854,7 @@ public class TestQPHelper extends LuceneTestCase { } public void testQueryStringEscaping() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c"); assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c"); @@ -895,7 +895,7 @@ public class TestQPHelper extends LuceneTestCase { @Ignore("contrib queryparser shouldn't escape wildcard terms") public void testEscapedWildcard() throws Exception { StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); WildcardQuery q = new WildcardQuery(new Term("field", "foo\\?ba?r")); assertEquals(q, qp.parse("foo\\?ba?r", "field")); @@ -933,7 +933,7 @@ public class TestQPHelper extends LuceneTestCase { public void testBoost() throws Exception { CharacterRunAutomaton stopSet = new CharacterRunAutomaton(BasicAutomata.makeString("on")); - Analyzer oneStopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true); + Analyzer oneStopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet, true); StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(oneStopAnalyzer); @@ -949,7 +949,7 @@ public class TestQPHelper extends LuceneTestCase { assertNotNull(q); StandardQueryParser qp2 = new StandardQueryParser(); - qp2.setAnalyzer(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); + qp2.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); q = qp2.parse("the^3", "field"); // "the" is a stop word so the result is an empty query: @@ -979,7 +979,7 @@ public class TestQPHelper extends LuceneTestCase { public void testCustomQueryParserWildcard() { try { - new QPTestParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("a?t", "contents"); + new QPTestParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).parse("a?t", "contents"); fail("Wildcard queries should not be allowed"); } catch (QueryNodeException expected) { // expected exception @@ -988,7 +988,7 @@ public class TestQPHelper extends LuceneTestCase { public void testCustomQueryParserFuzzy() throws Exception { try { - new QPTestParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("xunit~", "contents"); + new QPTestParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).parse("xunit~", "contents"); fail("Fuzzy queries should not be allowed"); } catch (QueryNodeException expected) { // expected exception @@ -999,7 +999,7 @@ public class TestQPHelper extends LuceneTestCase { BooleanQuery.setMaxClauseCount(2); try { StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); qp.parse("one two three", "field"); fail("ParseException expected due to too many boolean clauses"); @@ -1013,7 +1013,7 @@ public class TestQPHelper extends LuceneTestCase { */ public void testPrecedence() throws Exception { StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); Query query1 = qp.parse("A AND B OR C AND D", "field"); Query query2 = qp.parse("+A +B +C +D", "field"); @@ -1145,7 +1145,7 @@ public class TestQPHelper extends LuceneTestCase { public void testStopwords() throws Exception { StandardQueryParser qp = new StandardQueryParser(); CharacterRunAutomaton stopSet = new CharacterRunAutomaton(new RegExp("the|foo").toAutomaton()); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true)); + qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet, true)); Query result = qp.parse("a:the OR a:foo", "a"); assertNotNull("result is null and it shouldn't be", result); @@ -1169,7 +1169,7 @@ public class TestQPHelper extends LuceneTestCase { public void testPositionIncrement() throws Exception { StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer( - new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); qp.setEnablePositionIncrements(true); @@ -1190,7 +1190,7 @@ public class TestQPHelper extends LuceneTestCase { public void testMatchAllDocs() throws Exception { StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); assertEquals(new MatchAllDocsQuery(), qp.parse("*:*", "field")); assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)", "field")); @@ -1202,7 +1202,7 @@ public class TestQPHelper extends LuceneTestCase { private void assertHits(int expected, String query, IndexSearcher is) throws IOException, QueryNodeException { StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); qp.setLocale(Locale.ENGLISH); Query q = qp.parse(query, "date"); diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java index d0bb1dddc28..910c7e0f270 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java @@ -17,8 +17,6 @@ package org.apache.lucene.queryparser.surround.query; * limitations under the License. */ -import junit.framework.Assert; - import org.apache.lucene.queryparser.surround.parser.QueryParser; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java index e02e25e4269..a0e4639f87e 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java @@ -39,7 +39,12 @@ public class Test02Boolean extends LuceneTestCase { "a c e a b c" }; - SingleFieldTestDb db1 = new SingleFieldTestDb(random, docs1, fieldName); + public void setUp() throws Exception { + super.setUp(); + db1 = new SingleFieldTestDb(random(), docs1, fieldName); + } + + SingleFieldTestDb db1; public void normalTest1(String query, int[] expdnrs) throws Exception { BooleanQueryTst bqt = new BooleanQueryTst( query, expdnrs, db1, fieldName, this, diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java index 336acf93381..cf22ec1f70d 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java @@ -26,6 +26,7 @@ public class Test03Distance extends LuceneTestCase { public static void main(String args[]) { TestRunner.run(new TestSuite(Test03Distance.class)); } + boolean verbose = false; int maxBasicQueries = 16; @@ -58,8 +59,16 @@ public class Test03Distance extends LuceneTestCase { "a c e a b c" }; - SingleFieldTestDb db1 = new SingleFieldTestDb(random, docs1, fieldName); + SingleFieldTestDb db1; + @Override + public void setUp() throws Exception { + super.setUp(); + db1 = new SingleFieldTestDb(random(), docs1, fieldName); + db2 = new SingleFieldTestDb(random(), docs2, fieldName); + db3 = new SingleFieldTestDb(random(), docs3, fieldName); + } + private void distanceTst(String query, int[] expdnrs, SingleFieldTestDb db) throws Exception { BooleanQueryTst bqt = new BooleanQueryTst( query, expdnrs, db, fieldName, this, new BasicQueryFactory(maxBasicQueries)); @@ -179,7 +188,7 @@ public class Test03Distance extends LuceneTestCase { "" }; - SingleFieldTestDb db2 = new SingleFieldTestDb(random, docs2, fieldName); + SingleFieldTestDb db2; public void distanceTest2(String query, int[] expdnrs) throws Exception { distanceTst(query, expdnrs, db2); @@ -227,7 +236,7 @@ public class Test03Distance extends LuceneTestCase { "" }; - SingleFieldTestDb db3 = new SingleFieldTestDb(random, docs3, fieldName); + SingleFieldTestDb db3; public void distanceTest3(String query, int[] expdnrs) throws Exception { distanceTst(query, expdnrs, db3); diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java index d64bbef15ca..0164394bff2 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java @@ -199,7 +199,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public Query getQueryDOA(String query, Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a); qp.setDefaultOperator(QueryParserBase.AND_OPERATOR); return qp.parse(query); @@ -320,8 +320,8 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testSimple() throws Exception { assertQueryEquals("term term term", null, "term term term"); - assertQueryEquals("türm term term", new MockAnalyzer(random), "türm term term"); - assertQueryEquals("ümlaut", new MockAnalyzer(random), "ümlaut"); + assertQueryEquals("türm term term", new MockAnalyzer(random()), "türm term term"); + assertQueryEquals("ümlaut", new MockAnalyzer(random()), "ümlaut"); // FIXME: enhance MockAnalyzer to be able to support this // it must no longer extend CharTokenizer @@ -381,7 +381,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null, "+(title:dog title:cat) -author:\"bob dole\""); - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random())); // make sure OR is the default: assertEquals(QueryParserBase.OR_OPERATOR, qp.getDefaultOperator()); qp.setDefaultOperator(QueryParserBase.AND_OPERATOR); @@ -391,7 +391,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testPunct() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a&b", a, "a&b"); assertQueryEquals("a&&b", a, "a&&b"); assertQueryEquals(".NET", a, ".NET"); @@ -411,7 +411,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, true); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -539,7 +539,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod()); - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)); qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod()); @@ -610,7 +610,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { final String defaultField = "default"; final String monthField = "month"; final String hourField = "hour"; - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)); // set a field specific date resolution qp.setDateResolution(monthField, DateTools.Resolution.MONTH); @@ -643,7 +643,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testEscaped() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); /*assertQueryEquals("\\[brackets", a, "\\[brackets"); assertQueryEquals("\\[brackets", null, "brackets"); @@ -737,7 +737,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testQueryStringEscaping() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c"); assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c"); @@ -823,7 +823,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testBoost() throws Exception { CharacterRunAutomaton stopWords = new CharacterRunAutomaton(BasicAutomata.makeString("on")); - Analyzer oneStopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true); + Analyzer oneStopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopWords, true); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", oneStopAnalyzer); Query q = qp.parse("on^1.0"); assertNotNull(q); @@ -836,7 +836,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { q = qp.parse("\"on\"^1.0"); assertNotNull(q); - QueryParser qp2 = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); + QueryParser qp2 = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); q = qp2.parse("the^3"); // "the" is a stop word so the result is an empty query: assertNotNull(q); @@ -865,7 +865,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testCustomQueryParserWildcard() { try { - new QPTestParser("contents", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("a?t"); + new QPTestParser("contents", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).parse("a?t"); fail("Wildcard queries should not be allowed"); } catch (ParseException expected) { // expected exception @@ -874,7 +874,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testCustomQueryParserFuzzy() throws Exception { try { - new QPTestParser("contents", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("xunit~"); + new QPTestParser("contents", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).parse("xunit~"); fail("Fuzzy queries should not be allowed"); } catch (ParseException expected) { // expected exception @@ -884,7 +884,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testBooleanQuery() throws Exception { BooleanQuery.setMaxClauseCount(2); try { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); qp.parse("one two three"); fail("ParseException expected due to too many boolean clauses"); } catch (ParseException expected) { @@ -896,7 +896,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { * This test differs from TestPrecedenceQueryParser */ public void testPrecedence() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); Query query1 = qp.parse("A AND B OR C AND D"); Query query2 = qp.parse("+A +B +C +D"); assertEquals(query1, query2); @@ -932,7 +932,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testStarParsing() throws Exception { final int[] type = new int[1]; - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) { + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) { @Override protected Query getWildcardQuery(String field, String termStr) throws ParseException { // override error checking of superclass @@ -991,13 +991,13 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testEscapedWildcard() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); WildcardQuery q = new WildcardQuery(new Term("field", "foo\\?ba?r")); assertEquals(q, qp.parse("foo\\?ba?r")); } public void testRegexps() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); RegexpQuery q = new RegexpQuery(new Term("field", "[a-z][123]")); assertEquals(q, qp.parse("/[a-z][123]/")); qp.setLowercaseExpandedTerms(true); @@ -1025,7 +1025,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testStopwords() throws Exception { CharacterRunAutomaton stopSet = new CharacterRunAutomaton(new RegExp("the|foo").toAutomaton()); - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet, true)); Query result = qp.parse("a:the OR a:foo"); assertNotNull("result is null and it shouldn't be", result); assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery); @@ -1041,7 +1041,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testPositionIncrement() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); qp.setEnablePositionIncrements(true); String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; // 0 2 5 7 8 @@ -1058,7 +1058,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testMatchAllDocs() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); assertEquals(new MatchAllDocsQuery(), qp.parse("*:*")); assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)")); BooleanQuery bq = (BooleanQuery)qp.parse("+*:* -*:*"); @@ -1067,7 +1067,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } private void assertHits(int expected, String query, IndexSearcher is) throws ParseException, IOException { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "date", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "date", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); qp.setLocale(Locale.ENGLISH); Query q = qp.parse(query); ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs; @@ -1085,7 +1085,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { // "match" public void testPositionIncrements() throws Exception { Directory dir = newDirectory(); - Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, a)); Document doc = new Document(); doc.add(newField("f", "the wizard of ozzy", TextField.TYPE_UNSTORED)); @@ -1248,13 +1248,13 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testDistanceAsEditsParsing() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random())); FuzzyQuery q = (FuzzyQuery) qp.parse("foobar~2"); assertEquals(2f, q.getMinSimilarity(), 0.0001f); } public void testPhraseQueryToString() throws ParseException { - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer); qp.setEnablePositionIncrements(true); PhraseQuery q = (PhraseQuery)qp.parse("\"this hi this is a test is\""); @@ -1263,7 +1263,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testParseWildcardAndPhraseQueries() throws ParseException { String field = "content"; - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, field, new MockAnalyzer(random)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, field, new MockAnalyzer(random())); qp.setAllowLeadingWildcard(true); String prefixQueries[][] = { @@ -1302,7 +1302,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { new CharacterRunAutomaton(new RegExp("[sS][tT][oO][pP]").toAutomaton()); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false, stopStopList, false)); + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false, stopStopList, false)); PhraseQuery phraseQuery = new PhraseQuery(); phraseQuery.add(new Term("field", "1")); @@ -1318,7 +1318,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertEquals(phraseQuery, qp.parse("\"1 stop 2\"")); qp = new QueryParser(TEST_VERSION_CURRENT, "field", - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false, stopStopList, true)); + new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false, stopStopList, true)); qp.setEnablePositionIncrements(true); phraseQuery = new PhraseQuery(); @@ -1329,7 +1329,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testMatchAllQueryParsing() throws Exception { // test simple parsing of MatchAllDocsQuery - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "key", new MockAnalyzer(random)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "key", new MockAnalyzer(random())); assertEquals(new MatchAllDocsQuery(), qp.parse(new MatchAllDocsQuery().toString())); // test parsing with non-default boost diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java index 1d8f9ebb569..abbb4045e4d 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java @@ -54,7 +54,7 @@ public class TestParser extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT): - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false); + Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false); //initialize the parser builder = new CorePlusExtensionsParser("contents", analyzer); diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java index cd9c12cce11..868880aecc4 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java @@ -45,7 +45,7 @@ import java.util.StringTokenizer; public class TestQueryTemplateManager extends LuceneTestCase { private CoreParser builder; - private final Analyzer analyzer = new MockAnalyzer(random); + private Analyzer analyzer; private IndexSearcher searcher; private IndexReader reader; private Directory dir; @@ -141,6 +141,7 @@ public class TestQueryTemplateManager extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); + analyzer = new MockAnalyzer(random()); //Create an index dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); diff --git a/modules/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java b/modules/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java index bebbbf764f2..3bc278c92ff 100644 --- a/modules/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java +++ b/modules/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java @@ -32,8 +32,7 @@ import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; +import java.util.*; public abstract class SpatialTestCase extends LuceneTestCase { @@ -46,6 +45,7 @@ public abstract class SpatialTestCase extends LuceneTestCase { @Before public void setUp() throws Exception { super.setUp(); + Random random = random(); directory = newDirectory(); diff --git a/modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java b/modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java index 964d47874d5..a0c5a26352c 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java +++ b/modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java @@ -33,8 +33,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { public void testInternalLevenshteinDistance() throws Exception { DirectSpellChecker spellchecker = new DirectSpellChecker(); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.KEYWORD, true)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + new MockAnalyzer(random(), MockTokenizer.KEYWORD, true)); String[] termsToAdd = { "metanoia", "metanoian", "metanoiai", "metanoias", "metanoi𐑍" }; for (int i = 0; i < termsToAdd.length; i++) { @@ -63,8 +63,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { DirectSpellChecker spellChecker = new DirectSpellChecker(); spellChecker.setMinQueryLength(0); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)); for (int i = 0; i < 20; i++) { Document doc = new Document(); @@ -128,8 +128,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { public void testOptions() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)); Document doc = new Document(); doc.add(newField("text", "foobar", TextField.TYPE_UNSTORED)); @@ -195,8 +195,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { public void testBogusField() throws Exception { DirectSpellChecker spellChecker = new DirectSpellChecker(); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)); for (int i = 0; i < 20; i++) { Document doc = new Document(); @@ -219,8 +219,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { public void testTransposition() throws Exception { DirectSpellChecker spellChecker = new DirectSpellChecker(); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)); for (int i = 0; i < 20; i++) { Document doc = new Document(); @@ -244,8 +244,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { public void testTransposition2() throws Exception { DirectSpellChecker spellChecker = new DirectSpellChecker(); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, + new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)); for (int i = 0; i < 20; i++) { Document doc = new Document(); diff --git a/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java b/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java index 4373ba3f4cd..40ac84af7ea 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java +++ b/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java @@ -49,7 +49,7 @@ public class TestLuceneDictionary extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); store = newDirectory(); - IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); Document doc; diff --git a/modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java b/modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java index 2c3aed9d12e..9dd9e012139 100755 --- a/modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java +++ b/modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java @@ -54,7 +54,7 @@ public class TestSpellChecker extends LuceneTestCase { //create a user index userindex = newDirectory(); IndexWriter writer = new IndexWriter(userindex, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for (int i = 0; i < 1000; i++) { Document doc = new Document(); @@ -412,7 +412,7 @@ public class TestSpellChecker extends LuceneTestCase { assertEquals(4, searchers.size()); int num_field2 = this.numdoc(); assertEquals(num_field2, num_field1 + 1); - int numThreads = 5 + LuceneTestCase.random.nextInt(5); + int numThreads = 5 + random().nextInt(5); ExecutorService executor = Executors.newFixedThreadPool(numThreads); SpellCheckWorker[] workers = new SpellCheckWorker[numThreads]; for (int i = 0; i < numThreads; i++) { @@ -421,7 +421,7 @@ public class TestSpellChecker extends LuceneTestCase { workers[i] = spellCheckWorker; } - int iterations = 5 + random.nextInt(5); + int iterations = 5 + random().nextInt(5); for (int i = 0; i < iterations; i++) { Thread.sleep(100); // concurrently reset the spell index diff --git a/modules/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java b/modules/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java index 34bf6b1ed7a..74fb1d7473a 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java +++ b/modules/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java @@ -17,6 +17,7 @@ package org.apache.lucene.search.suggest; import java.io.File; +import java.util.Random; import java.io.FileInputStream; import java.io.FileOutputStream; import java.util.List; @@ -78,6 +79,7 @@ public class PersistenceTest extends LuceneTestCase { lookup.load(new FileInputStream(new File(storeDir, "lookup.dat"))); // Assert validity. + Random random = random(); long previous = Long.MIN_VALUE; for (TermFreq k : keys) { List list = lookup.lookup(_TestUtil.bytesToCharSequence(k.term, random), false, 1); diff --git a/modules/suggest/src/test/org/apache/lucene/search/suggest/TestBytesRefList.java b/modules/suggest/src/test/org/apache/lucene/search/suggest/TestBytesRefList.java index ca997fabc28..2b323b0823c 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/suggest/TestBytesRefList.java +++ b/modules/suggest/src/test/org/apache/lucene/search/suggest/TestBytesRefList.java @@ -18,9 +18,7 @@ package org.apache.lucene.search.suggest; */ import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; +import java.util.*; import org.apache.lucene.search.suggest.BytesRefList; import org.apache.lucene.util.BytesRef; @@ -29,8 +27,9 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; public class TestBytesRefList extends LuceneTestCase { - + public void testAppend() throws IOException { + Random random = random(); BytesRefList list = new BytesRefList(); List stringList = new ArrayList(); for (int j = 0; j < 2; j++) { @@ -69,8 +68,9 @@ public class TestBytesRefList extends LuceneTestCase { } } } - + public void testSort() throws IOException { + Random random = random(); BytesRefList list = new BytesRefList(); List stringList = new ArrayList(); diff --git a/modules/suggest/src/test/org/apache/lucene/search/suggest/TestHighFrequencyDictionary.java b/modules/suggest/src/test/org/apache/lucene/search/suggest/TestHighFrequencyDictionary.java index 71e479c33ee..e598f60e819 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/suggest/TestHighFrequencyDictionary.java +++ b/modules/suggest/src/test/org/apache/lucene/search/suggest/TestHighFrequencyDictionary.java @@ -30,7 +30,7 @@ import org.apache.lucene.util.LuceneTestCase; public class TestHighFrequencyDictionary extends LuceneTestCase { public void testEmpty() throws Exception { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.commit(); writer.close(); IndexReader ir = DirectoryReader.open(dir); diff --git a/modules/suggest/src/test/org/apache/lucene/search/suggest/TestTermFreqIterator.java b/modules/suggest/src/test/org/apache/lucene/search/suggest/TestTermFreqIterator.java index 5638894b83d..bbfb8494d8d 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/suggest/TestTermFreqIterator.java +++ b/modules/suggest/src/test/org/apache/lucene/search/suggest/TestTermFreqIterator.java @@ -20,6 +20,7 @@ package org.apache.lucene.search.suggest; import java.util.Comparator; import java.util.Iterator; import java.util.Map; +import java.util.Random; import java.util.TreeMap; import org.apache.lucene.search.spell.TermFreqIterator; @@ -40,6 +41,7 @@ public class TestTermFreqIterator extends LuceneTestCase { } public void testTerms() throws Exception { + Random random = random(); int num = atLeast(10000); Comparator comparator = random.nextBoolean() ? BytesRef.getUTF8SortedAsUnicodeComparator() : BytesRef.getUTF8SortedAsUTF16Comparator(); @@ -88,6 +90,7 @@ public class TestTermFreqIterator extends LuceneTestCase { byte[] buffer = new byte[0]; ByteArrayDataOutput output = new ByteArrayDataOutput(buffer); + final Random random = new Random(random().nextLong()); for (int i = 0; i < num; i++) { BytesRef spare; long weight; diff --git a/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java b/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java index 5c06670a3b2..c08bb4fd961 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java +++ b/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java @@ -35,8 +35,8 @@ public class BytesRefSortersTest extends LuceneTestCase { private void check(BytesRefSorter sorter) throws Exception { for (int i = 0; i < 100; i++) { - byte [] current = new byte [random.nextInt(256)]; - random.nextBytes(current); + byte [] current = new byte [random().nextInt(256)]; + random().nextBytes(current); sorter.add(new BytesRef(current)); } diff --git a/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java b/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java index 339282e642b..e2c5c87c5ae 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java +++ b/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java @@ -71,19 +71,19 @@ public class FSTCompletionTest extends LuceneTestCase { } public void testExactMatchHighPriority() throws Exception { - assertMatchEquals(completion.lookup(_TestUtil.stringToCharSequence("two", random), 1), + assertMatchEquals(completion.lookup(stringToCharSequence("two"), 1), "two/1.0"); } public void testExactMatchLowPriority() throws Exception { - assertMatchEquals(completion.lookup(_TestUtil.stringToCharSequence("one", random), 2), + assertMatchEquals(completion.lookup(stringToCharSequence("one"), 2), "one/0.0", "oneness/1.0"); } public void testExactMatchReordering() throws Exception { // Check reordering of exact matches. - assertMatchEquals(completion.lookup(_TestUtil.stringToCharSequence("four", random), 4), + assertMatchEquals(completion.lookup(stringToCharSequence("four"), 4), "four/0.0", "fourblah/1.0", "fourteen/1.0", @@ -92,49 +92,49 @@ public class FSTCompletionTest extends LuceneTestCase { public void testRequestedCount() throws Exception { // 'one' is promoted after collecting two higher ranking results. - assertMatchEquals(completion.lookup(_TestUtil.stringToCharSequence("one", random), 2), + assertMatchEquals(completion.lookup(stringToCharSequence("one"), 2), "one/0.0", "oneness/1.0"); // 'four' is collected in a bucket and then again as an exact match. - assertMatchEquals(completion.lookup(_TestUtil.stringToCharSequence("four", random), 2), + assertMatchEquals(completion.lookup(stringToCharSequence("four"), 2), "four/0.0", "fourblah/1.0"); // Check reordering of exact matches. - assertMatchEquals(completion.lookup(_TestUtil.stringToCharSequence("four", random), 4), + assertMatchEquals(completion.lookup(stringToCharSequence("four"), 4), "four/0.0", "fourblah/1.0", "fourteen/1.0", "fourier/0.0"); // 'one' is at the top after collecting all alphabetical results. - assertMatchEquals(completionAlphabetical.lookup(_TestUtil.stringToCharSequence("one", random), 2), + assertMatchEquals(completionAlphabetical.lookup(stringToCharSequence("one"), 2), "one/0.0", "oneness/1.0"); // 'one' is not promoted after collecting two higher ranking results. FSTCompletion noPromotion = new FSTCompletion(completion.getFST(), true, false); - assertMatchEquals(noPromotion.lookup(_TestUtil.stringToCharSequence("one", random), 2), + assertMatchEquals(noPromotion.lookup(stringToCharSequence("one"), 2), "oneness/1.0", "onerous/1.0"); // 'one' is at the top after collecting all alphabetical results. - assertMatchEquals(completionAlphabetical.lookup(_TestUtil.stringToCharSequence("one", random), 2), + assertMatchEquals(completionAlphabetical.lookup(stringToCharSequence("one"), 2), "one/0.0", "oneness/1.0"); } public void testMiss() throws Exception { - assertMatchEquals(completion.lookup(_TestUtil.stringToCharSequence("xyz", random), 1)); + assertMatchEquals(completion.lookup(stringToCharSequence("xyz"), 1)); } public void testAlphabeticWithWeights() throws Exception { - assertEquals(0, completionAlphabetical.lookup(_TestUtil.stringToCharSequence("xyz", random), 1).size()); + assertEquals(0, completionAlphabetical.lookup(stringToCharSequence("xyz"), 1).size()); } public void testFullMatchList() throws Exception { - assertMatchEquals(completion.lookup(_TestUtil.stringToCharSequence("one", random), Integer.MAX_VALUE), + assertMatchEquals(completion.lookup(stringToCharSequence("one"), Integer.MAX_VALUE), "oneness/1.0", "onerous/1.0", "onesimus/1.0", @@ -148,14 +148,14 @@ public class FSTCompletionTest extends LuceneTestCase { builder.add(new BytesRef(key), 0); FSTCompletion lookup = builder.build(); - List result = lookup.lookup(_TestUtil.stringToCharSequence(key, random), 1); + List result = lookup.lookup(stringToCharSequence(key), 1); assertEquals(1, result.size()); } public void testLargeInputConstantWeights() throws Exception { FSTCompletionLookup lookup = new FSTCompletionLookup(10, true); - Random r = random; + Random r = random(); List keys = new ArrayList(); for (int i = 0; i < 5000; i++) { keys.add(new TermFreq(_TestUtil.randomSimpleString(r), -1)); @@ -167,7 +167,7 @@ public class FSTCompletionTest extends LuceneTestCase { // are. Long previous = null; for (TermFreq tf : keys) { - Long current = ((Number)lookup.get(_TestUtil.bytesToCharSequence(tf.term, random))).longValue(); + Long current = ((Number)lookup.get(_TestUtil.bytesToCharSequence(tf.term, random()))).longValue(); if (previous != null) { assertEquals(previous, current); } @@ -181,11 +181,11 @@ public class FSTCompletionTest extends LuceneTestCase { FSTCompletionLookup lookup = new FSTCompletionLookup(); lookup.build(new TermFreqArrayIterator(input)); for (TermFreq tf : input) { - assertNotNull("Not found: " + tf.term.toString(), lookup.get(_TestUtil.bytesToCharSequence(tf.term, random))); - assertEquals(tf.term.utf8ToString(), lookup.lookup(_TestUtil.bytesToCharSequence(tf.term, random), true, 1).get(0).key.toString()); + assertNotNull("Not found: " + tf.term.toString(), lookup.get(_TestUtil.bytesToCharSequence(tf.term, random()))); + assertEquals(tf.term.utf8ToString(), lookup.lookup(_TestUtil.bytesToCharSequence(tf.term, random()), true, 1).get(0).key.toString()); } - List result = lookup.lookup(_TestUtil.stringToCharSequence("wit", random), true, 5); + List result = lookup.lookup(stringToCharSequence("wit"), true, 5); assertEquals(5, result.size()); assertTrue(result.get(0).key.toString().equals("wit")); // exact match. assertTrue(result.get(1).key.toString().equals("with")); // highest count. @@ -193,14 +193,14 @@ public class FSTCompletionTest extends LuceneTestCase { public void testEmptyInput() throws Exception { completion = new FSTCompletionBuilder().build(); - assertMatchEquals(completion.lookup(_TestUtil.stringToCharSequence("", random), 10)); + assertMatchEquals(completion.lookup(stringToCharSequence(""), 10)); } public void testRandom() throws Exception { List freqs = new ArrayList(); - Random rnd = random; + Random rnd = random(); for (int i = 0; i < 2500 + rnd.nextInt(2500); i++) { - int weight = random.nextInt(100); + int weight = rnd.nextInt(100); freqs.add(new TermFreq("" + rnd.nextLong(), weight)); } @@ -211,13 +211,17 @@ public class FSTCompletionTest extends LuceneTestCase { final String term = tf.term.utf8ToString(); for (int i = 1; i < term.length(); i++) { String prefix = term.substring(0, i); - for (LookupResult lr : lookup.lookup(_TestUtil.stringToCharSequence(prefix, random), true, 10)) { + for (LookupResult lr : lookup.lookup(stringToCharSequence(prefix), true, 10)) { assertTrue(lr.key.toString().startsWith(prefix)); } } } } + private CharSequence stringToCharSequence(String prefix) { + return _TestUtil.stringToCharSequence(prefix, random()); + } + private void assertMatchEquals(List res, String... expected) { String [] result = new String [res.size()]; for (int i = 0; i < res.size(); i++) { diff --git a/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java b/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java index 3a7937c8ac9..8d68aebf2e2 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java +++ b/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java @@ -85,8 +85,8 @@ public class TestSort extends LuceneTestCase { private byte[][] generateRandom(int howMuchData) { ArrayList data = new ArrayList(); while (howMuchData > 0) { - byte [] current = new byte [random.nextInt(256)]; - random.nextBytes(current); + byte [] current = new byte [random().nextInt(256)]; + random().nextBytes(current); data.add(current); howMuchData -= current.length; } diff --git a/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java b/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java index 6cadef3c379..4b3fe73ac4c 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java +++ b/modules/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java @@ -17,13 +17,7 @@ package org.apache.lucene.search.suggest.fst; * limitations under the License. */ -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.TreeSet; +import java.util.*; import org.apache.lucene.search.suggest.Lookup.LookupResult; import org.apache.lucene.search.suggest.TermFreq; @@ -41,6 +35,7 @@ public class WFSTCompletionTest extends LuceneTestCase { new TermFreq("barbara", 6) }; + Random random = new Random(random().nextLong()); WFSTCompletionLookup suggester = new WFSTCompletionLookup(); suggester.build(new TermFreqArrayIterator(keys)); @@ -88,7 +83,7 @@ public class WFSTCompletionTest extends LuceneTestCase { while (true) { // TODO: would be nice to fix this slowCompletor/comparator to // use full range, but we might lose some coverage too... - s = _TestUtil.randomSimpleString(random); + s = _TestUtil.randomSimpleString(random()); if (!slowCompletor.containsKey(s)) { break; } @@ -98,16 +93,16 @@ public class WFSTCompletionTest extends LuceneTestCase { allPrefixes.add(s.substring(0, j)); } // we can probably do Integer.MAX_VALUE here, but why worry. - int weight = random.nextInt(1<<24); + int weight = random().nextInt(1<<24); slowCompletor.put(s, (long)weight); keys[i] = new TermFreq(s, weight); } WFSTCompletionLookup suggester = new WFSTCompletionLookup(false); suggester.build(new TermFreqArrayIterator(keys)); - + + Random random = new Random(random().nextLong()); for (String prefix : allPrefixes) { - final int topN = _TestUtil.nextInt(random, 1, 10); List r = suggester.lookup(_TestUtil.stringToCharSequence(prefix, random), false, topN); diff --git a/solr/contrib/contrib-build.xml b/solr/contrib/contrib-build.xml index 86c2d1855bc..fc5888b8a4b 100644 --- a/solr/contrib/contrib-build.xml +++ b/solr/contrib/contrib-build.xml @@ -25,9 +25,6 @@ - - - diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java index 6cfa2fe3b49..ce71937a3e1 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java @@ -45,7 +45,7 @@ public class TestEphemeralCache extends AbstractDataImportHandlerTestCase { public void test() throws Exception { assertFullImport(getDataConfigDotXml()); } - + @SuppressWarnings("unchecked") private void setupMockData() { List parentRows = new ArrayList(); diff --git a/solr/core/src/test/org/apache/solr/TestDistributedSearch.java b/solr/core/src/test/org/apache/solr/TestDistributedSearch.java index b7318c8e3b1..dcef030266c 100755 --- a/solr/core/src/test/org/apache/solr/TestDistributedSearch.java +++ b/solr/core/src/test/org/apache/solr/TestDistributedSearch.java @@ -221,20 +221,20 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase { stress=0; // turn off stress... we want to tex max combos in min time for (int i=0; i<25*RANDOM_MULTIPLIER; i++) { - String f = fieldNames[random.nextInt(fieldNames.length)]; - if (random.nextBoolean()) f = t1; // the text field is a really interesting one to facet on (and it's multi-valued too) + String f = fieldNames[random().nextInt(fieldNames.length)]; + if (random().nextBoolean()) f = t1; // the text field is a really interesting one to facet on (and it's multi-valued too) // we want a random query and not just *:* so we'll get zero counts in facets also // TODO: do a better random query - String q = random.nextBoolean() ? "*:*" : "id:(1 3 5 7 9 11 13) OR id:[100 TO " + random.nextInt(50) + "]"; + String q = random().nextBoolean() ? "*:*" : "id:(1 3 5 7 9 11 13) OR id:[100 TO " + random().nextInt(50) + "]"; - int nolimit = random.nextBoolean() ? -1 : 10000; // these should be equivalent + int nolimit = random().nextBoolean() ? -1 : 10000; // these should be equivalent // if limit==-1, we should always get exact matches - query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.limit",nolimit, "facet.sort","count", "facet.mincount",random.nextInt(5), "facet.offset",random.nextInt(10)); - query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.limit",nolimit, "facet.sort","index", "facet.mincount",random.nextInt(5), "facet.offset",random.nextInt(10)); + query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.limit",nolimit, "facet.sort","count", "facet.mincount",random().nextInt(5), "facet.offset",random().nextInt(10)); + query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.limit",nolimit, "facet.sort","index", "facet.mincount",random().nextInt(5), "facet.offset",random().nextInt(10)); // for index sort, we should get exact results for mincount <= 1 - query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.sort","index", "facet.mincount",random.nextInt(2), "facet.offset",random.nextInt(10), "facet.limit",random.nextInt(11)-1); + query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.sort","index", "facet.mincount",random().nextInt(2), "facet.offset",random().nextInt(10), "facet.limit",random().nextInt(11)-1); } stress = backupStress; // restore stress diff --git a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java index b6e32c7c641..c0ce1e2bc44 100644 --- a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java +++ b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java @@ -555,7 +555,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { while (--indexIter >= 0) { - int indexSize = random.nextInt(25 * RANDOM_MULTIPLIER); + int indexSize = random().nextInt(25 * RANDOM_MULTIPLIER); //indexSize=2; List types = new ArrayList(); types.add(new FldType("id",ONE_ONE, new SVal('A','Z',4,4))); @@ -611,17 +611,17 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { for (int qiter=0; qiter sortComparator = createSort(h.getCore().getSchema(), types, stringSortA); String sortStr = stringSortA[0]; - Comparator groupComparator = random.nextBoolean() ? sortComparator : createSort(h.getCore().getSchema(), types, stringSortA); + Comparator groupComparator = random().nextBoolean() ? sortComparator : createSort(h.getCore().getSchema(), types, stringSortA); String groupSortStr = stringSortA[0]; // since groupSortStr defaults to sortStr, we need to normalize null to "score desc" if @@ -657,10 +657,10 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { List sortedGroups = new ArrayList(groups.values()); Collections.sort(sortedGroups, groupComparator==sortComparator ? createFirstDocComparator(sortComparator) : createMaxDocComparator(sortComparator)); - boolean includeNGroups = random.nextBoolean(); + boolean includeNGroups = random().nextBoolean(); Object modelResponse = buildGroupedResult(h.getCore().getSchema(), sortedGroups, start, rows, group_offset, group_limit, includeNGroups); - boolean truncateGroups = random.nextBoolean(); + boolean truncateGroups = random().nextBoolean(); Map facetCounts = new TreeMap(); if (truncateGroups) { for (Grp grp : sortedGroups) { @@ -694,7 +694,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { expectedFacetResponse.add(stringIntegerEntry.getValue()); } - int randomPercentage = random.nextInt(101); + int randomPercentage = random().nextInt(101); // TODO: create a random filter too SolrQueryRequest req = req("group","true","wt","json","indent","true", "echoParams","all", "q","{!func}score_f", "group.field",groupField ,sortStr==null ? "nosort":"sort", sortStr ==null ? "": sortStr diff --git a/solr/core/src/test/org/apache/solr/TestJoin.java b/solr/core/src/test/org/apache/solr/TestJoin.java index ebb65603b7a..60b6e8f7c7d 100644 --- a/solr/core/src/test/org/apache/solr/TestJoin.java +++ b/solr/core/src/test/org/apache/solr/TestJoin.java @@ -151,7 +151,7 @@ public class TestJoin extends SolrTestCaseJ4 { while (--indexIter >= 0) { - int indexSize = random.nextInt(20 * RANDOM_MULTIPLIER); + int indexSize = random().nextInt(20 * RANDOM_MULTIPLIER); List types = new ArrayList(); types.add(new FldType("id",ONE_ONE, new SVal('A','Z',4,4))); @@ -172,16 +172,16 @@ public class TestJoin extends SolrTestCaseJ4 { for (int qiter=0; qiter> pivot = pivots.get(fromField+"/"+toField); @@ -210,7 +210,7 @@ public class TestJoin extends SolrTestCaseJ4 { SolrQueryRequest req = req("wt","json","indent","true", "echoParams","all", "q","{!join from="+fromField+" to="+toField - + (random.nextInt(4)==0 ? " fromIndex=collection1" : "") + + (random().nextInt(4)==0 ? " fromIndex=collection1" : "") +"}*:*" ); diff --git a/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java b/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java index 9cca9d9d98b..a4a9a57da1a 100644 --- a/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java +++ b/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java @@ -273,12 +273,12 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { }; int numRounds = RANDOM_MULTIPLIER * 10000; - checkRandomData(random, analyzer, numRounds); + checkRandomData(random(), analyzer, numRounds); } public void testRandomBrokenHTML() throws Exception { int maxNumElements = 10000; - String text = _TestUtil.randomHtmlishString(random, maxNumElements); + String text = _TestUtil.randomHtmlishString(random(), maxNumElements); Reader reader = new LegacyHTMLStripCharFilter(CharReader.get(new StringReader(text))); while (reader.read() != -1); @@ -290,11 +290,11 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { int maxNumWords = 10000; int minWordLength = 3; int maxWordLength = 20; - int numWords = _TestUtil.nextInt(random, minNumWords, maxNumWords); - switch (_TestUtil.nextInt(random, 0, 4)) { + int numWords = _TestUtil.nextInt(random(), minNumWords, maxNumWords); + switch (_TestUtil.nextInt(random(), 0, 4)) { case 0: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomUnicodeString(random, maxWordLength)); + text.append(_TestUtil.randomUnicodeString(random(), maxWordLength)); text.append(' '); } break; @@ -302,14 +302,14 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { case 1: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { text.append(_TestUtil.randomRealisticUnicodeString - (random, minWordLength, maxWordLength)); + (random(), minWordLength, maxWordLength)); text.append(' '); } break; } default: { // ASCII 50% of the time for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomSimpleString(random)); + text.append(_TestUtil.randomSimpleString(random())); text.append(' '); } } diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java index 6fce0523677..2f2f98256b8 100644 --- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java @@ -56,7 +56,6 @@ import org.apache.solr.util.DefaultSolrThreadFactory; /** * */ - public class BasicDistributedZkTest extends AbstractDistributedZkTestCase { private static final String DEFAULT_COLLECTION = "collection1"; diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java index a655439af30..3a65bed6b74 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java @@ -42,15 +42,12 @@ import org.junit.Ignore; @Ignore("ignore while investigating jenkins fails") public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest { - @BeforeClass public static void beforeSuperClass() throws Exception { - } @AfterClass public static void afterSuperClass() throws Exception { - } @Before @@ -237,7 +234,7 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest { while (true && !stop) { ++i; - if (doDeletes && random.nextBoolean() && deletes.size() > 0) { + if (doDeletes && random().nextBoolean() && deletes.size() > 0) { Integer delete = deletes.remove(0); try { numDeletes++; @@ -271,7 +268,7 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest { fails.incrementAndGet(); } - if (doDeletes && random.nextBoolean()) { + if (doDeletes && random().nextBoolean()) { deletes.add(i); } diff --git a/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java b/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java index 5478ce2b89c..f401c0de848 100644 --- a/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java @@ -49,7 +49,6 @@ public class FullSolrCloudDistribCmdsTest extends FullSolrCloudTest { @BeforeClass public static void beforeSuperClass() throws Exception { - } public FullSolrCloudDistribCmdsTest() { diff --git a/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudTest.java b/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudTest.java index 19a4b1fc9ae..d2b100ff1f0 100644 --- a/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudTest.java @@ -185,7 +185,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase { chaosMonkey = new ChaosMonkey(zkServer, zkStateReader, DEFAULT_COLLECTION, shardToJetty, shardToClient, shardToLeaderClient, - shardToLeaderJetty, random); + shardToLeaderJetty, random()); } // wait until shards have started registering... @@ -1270,7 +1270,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase { while (true && !stop) { ++i; - if (doDeletes && random.nextBoolean() && deletes.size() > 0) { + if (doDeletes && random().nextBoolean() && deletes.size() > 0) { Integer delete = deletes.remove(0); try { numDeletes++; @@ -1293,7 +1293,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase { fails.incrementAndGet(); } - if (doDeletes && random.nextBoolean()) { + if (doDeletes && random().nextBoolean()) { deletes.add(i); } diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java index 81a23530205..f33ec7d7769 100644 --- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java @@ -324,7 +324,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 { public void run() { int count = atLeast(5); for (int i = 1; i < count; i++) { - int launchIn = random.nextInt(500); + int launchIn = random().nextInt(500); ClientThread thread = null; try { thread = new ClientThread(i); @@ -348,7 +348,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 { int j; try { // always 1 we won't kill... - j = random.nextInt(threads.size() - 2); + j = random().nextInt(threads.size() - 2); } catch(IllegalArgumentException e) { continue; } @@ -378,7 +378,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 { try { Thread.sleep(50); int j; - j = random.nextInt(threads.size()); + j = random().nextInt(threads.size()); try { threads.get(j).zkClient.getSolrZooKeeper().pauseCnxn( ZkTestServer.TICK_TIME * 2); diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java index 9a90172faae..11f0e5f35b1 100644 --- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java @@ -24,6 +24,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -259,9 +260,9 @@ public class OverseerTest extends SolrTestCaseJ4 { String zkDir = dataDir.getAbsolutePath() + File.separator + "zookeeper/server1/data"; - final int nodeCount = random.nextInt(50)+50; //how many simulated nodes (num of threads) - final int coreCount = random.nextInt(100)+100; //how many cores to register - final int sliceCount = random.nextInt(20)+1; //how many slices + final int nodeCount = random().nextInt(50)+50; //how many simulated nodes (num of threads) + final int coreCount = random().nextInt(100)+100; //how many cores to register + final int sliceCount = random().nextInt(20)+1; //how many slices ZkTestServer server = new ZkTestServer(zkDir); @@ -645,8 +646,9 @@ public class OverseerTest extends SolrTestCaseJ4 { } catch (Throwable t) { //t.printStackTrace(); } + Random rnd = random(); while (run) { - if(random.nextInt(20)==1){ + if(rnd.nextInt(20)==1){ try { overseerClient.close(); overseerClient = electNewOverseer(zkAddress); diff --git a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java index f501c7fa88f..05d79dcc9ec 100644 --- a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java +++ b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java @@ -773,7 +773,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { String configFile = "solrconfig-master1.xml"; boolean addNumberToKeepInRequest = true; String backupKeepParamName = ReplicationHandler.NUMBER_BACKUPS_TO_KEEP_REQUEST_PARAM; - if(random.nextBoolean()) { + if(random().nextBoolean()) { configFile = "solrconfig-master1-keepOneBackup.xml"; addNumberToKeepInRequest = false; backupKeepParamName = ReplicationHandler.NUMBER_BACKUPS_TO_KEEP_INIT_PARAM; diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java index 2f2ac0fd5c2..0a99076757e 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java @@ -46,12 +46,13 @@ public class DistributedSpellCheckComponentTest extends BaseDistributedSearchTes } private String saveProp; + @Override public void setUp() throws Exception { // this test requires FSDir saveProp = System.getProperty("solr.directoryFactory"); System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory"); - requestHandlerName = random.nextBoolean() ? "spellCheckCompRH" : "spellCheckCompRH_Direct"; + requestHandlerName = random().nextBoolean() ? "spellCheckCompRH" : "spellCheckCompRH_Direct"; super.setUp(); } diff --git a/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java index efbc17cad22..192964554e3 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java @@ -153,7 +153,7 @@ public class TermVectorComponentTest extends SolrTestCaseJ4 { StringBuilder expected = new StringBuilder("/termVectors/doc-0/test_posofftv/anoth=={"); boolean first = true; for (int i = 0; i < options.length; i++) { - final boolean use = random.nextBoolean(); + final boolean use = random().nextBoolean(); if (use) { if (!first) { expected.append(", "); diff --git a/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java b/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java index 6e7361fb9fe..4e8525c985b 100644 --- a/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java +++ b/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java @@ -42,7 +42,7 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 { static int random_dupe_percent = 25; // some duplicates in the index to create deleted docs static void randomCommit(int percent_chance) { - if (random.nextInt(100) <= percent_chance) + if (random().nextInt(100) <= percent_chance) assertU(commit()); } @@ -52,7 +52,7 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 { static void add_doc(String... fieldsAndValues) { do { pendingDocs.add(fieldsAndValues); - } while (random.nextInt(100) <= random_dupe_percent); + } while (random().nextInt(100) <= random_dupe_percent); // assertU(adoc(fieldsAndValues)); // randomCommit(random_commit_percent); @@ -69,7 +69,7 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 { indexFacetPrefixSingleValued(); indexSimpleGroupedFacetCounts(); - Collections.shuffle(pendingDocs, random); + Collections.shuffle(pendingDocs, random()); for (String[] doc : pendingDocs) { assertU(adoc(doc)); randomCommit(random_commit_percent); diff --git a/solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java b/solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java index 9f7c8ccf52a..60128706e3a 100644 --- a/solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java +++ b/solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java @@ -152,13 +152,13 @@ public class SpatialFilterTest extends SolrTestCaseJ4 { int postFilterCount = DelegatingCollector.setLastDelegateCount; // throw in a random into the main query to prevent most cache hits - assertQ(req("fl", "id", "q","*:* OR foo_i:" + random.nextInt(100), "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName +"}", + assertQ(req("fl", "id", "q","*:* OR foo_i:" + random().nextInt(100), "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName +"}", "pt", pt, "d", String.valueOf(distance)), tests); assertEquals(postFilterCount, DelegatingCollector.setLastDelegateCount); // post filtering shouldn't be used // try uncached - assertQ(req("fl", "id", "q","*:* OR foo_i:" + random.nextInt(100), "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName + " cache=false" + "}", + assertQ(req("fl", "id", "q","*:* OR foo_i:" + random().nextInt(100), "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName + " cache=false" + "}", "pt", pt, "d", String.valueOf(distance)), tests); assertEquals(postFilterCount, DelegatingCollector.setLastDelegateCount); // post filtering shouldn't be used @@ -166,7 +166,7 @@ public class SpatialFilterTest extends SolrTestCaseJ4 { // try post filtered for fields that support it if (fieldName.endsWith("ll")) { - assertQ(req("fl", "id", "q","*:* OR foo_i:" + random.nextInt(100)+100, "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName + " cache=false cost=150" + "}", + assertQ(req("fl", "id", "q","*:* OR foo_i:" + random().nextInt(100)+100, "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName + " cache=false cost=150" + "}", "pt", pt, "d", String.valueOf(distance)), tests); assertEquals(postFilterCount + 1, DelegatingCollector.setLastDelegateCount); // post filtering *should* have been used diff --git a/solr/core/src/test/org/apache/solr/search/TestDocSet.java b/solr/core/src/test/org/apache/solr/search/TestDocSet.java index 1e8f295aa55..c599c1cee70 100644 --- a/solr/core/src/test/org/apache/solr/search/TestDocSet.java +++ b/solr/core/src/test/org/apache/solr/search/TestDocSet.java @@ -42,9 +42,15 @@ import org.apache.lucene.util.OpenBitSetIterator; * */ public class TestDocSet extends LuceneTestCase { - Random rand = random; + Random rand; float loadfactor; + @Override + public void setUp() throws Exception { + super.setUp(); + rand = random(); + } + public OpenBitSet getRandomSet(int sz, int bitsToSet) { OpenBitSet bs = new OpenBitSet(sz); if (sz==0) return bs; diff --git a/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java b/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java index 3dec3894d7f..03c99f3e689 100644 --- a/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java +++ b/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java @@ -256,13 +256,13 @@ public class TestFastLRUCache extends LuceneTestCase { // enough randomness to exercise all of the different cache purging phases public void testRandom() { - int sz = random.nextInt(100)+5; - int lowWaterMark = random.nextInt(sz-3)+1; - int keyrange = random.nextInt(sz*3)+1; + int sz = random().nextInt(100)+5; + int lowWaterMark = random().nextInt(sz-3)+1; + int keyrange = random().nextInt(sz*3)+1; ConcurrentLRUCache cache = new ConcurrentLRUCache(sz, lowWaterMark); for (int i=0; i<10000; i++) { - cache.put(random.nextInt(keyrange), ""); - cache.get(random.nextInt(keyrange)); + cache.put(random().nextInt(keyrange), ""); + cache.get(random().nextInt(keyrange)); } } @@ -272,7 +272,7 @@ public class TestFastLRUCache extends LuceneTestCase { int lowerWaterMark = cacheSize; int upperWaterMark = (int)(lowerWaterMark * 1.1); - Random r = random; + Random r = random(); ConcurrentLRUCache cache = new ConcurrentLRUCache(upperWaterMark, lowerWaterMark, (upperWaterMark+lowerWaterMark)/2, upperWaterMark, false, false, null); boolean getSize=false; int minSize=0,maxSize=0; @@ -323,7 +323,7 @@ public class TestFastLRUCache extends LuceneTestCase { void fillCache(SolrCache sc, int cacheSize, int maxKey) { for (int i=0; i n) { n = uu-ll+1; u = uu; @@ -187,8 +187,8 @@ public class TestFiltering extends SolrTestCaseJ4 { } } else { // negative frange.. make it relatively small - l = random.nextInt(model.indexSize); - u = Math.max(model.indexSize-1, l+random.nextInt(Math.max(model.indexSize / 10, 2))); + l = random().nextInt(model.indexSize); + u = Math.max(model.indexSize-1, l+random().nextInt(Math.max(model.indexSize / 10, 2))); for (OpenBitSet set : sets) { set.clear(l,u+1); @@ -200,7 +200,7 @@ public class TestFiltering extends SolrTestCaseJ4 { // term or boolean query OpenBitSet pset = new OpenBitSet(model.indexSize); for (int i=0; i params = new ArrayList(); params.add("q"); params.add(makeRandomQuery(model, true, false)); - int nFilters = random.nextInt(5); + int nFilters = random().nextInt(5); for (int i=0; i fl = Arrays.asList diff --git a/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java b/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java index 2604a35e4a9..a905b8e1fd1 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java +++ b/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java @@ -300,19 +300,19 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { // req().getCore().getUpdateHandler().getIndexWriterProvider().getIndexWriter(req().getCore()).setInfoStream(System.out); - final int commitPercent = 5 + random.nextInt(20); - final int softCommitPercent = 30+random.nextInt(75); // what percent of the commits are soft - final int deletePercent = 4+random.nextInt(25); - final int deleteByQueryPercent = 1+random.nextInt(5); - final int ndocs = 5 + (random.nextBoolean() ? random.nextInt(25) : random.nextInt(200)); - int nWriteThreads = 5 + random.nextInt(25); + final int commitPercent = 5 + random().nextInt(20); + final int softCommitPercent = 30+random().nextInt(75); // what percent of the commits are soft + final int deletePercent = 4+random().nextInt(25); + final int deleteByQueryPercent = 1+random().nextInt(5); + final int ndocs = 5 + (random().nextBoolean() ? random().nextInt(25) : random().nextInt(200)); + int nWriteThreads = 5 + random().nextInt(25); final int maxConcurrentCommits = nWriteThreads; // number of committers at a time... it should be <= maxWarmingSearchers // query variables final int percentRealtimeQuery = 60; final AtomicLong operations = new AtomicLong(50000); // number of query operations to perform in total - int nReadThreads = 5 + random.nextInt(25); + int nReadThreads = 5 + random().nextInt(25); verbose("commitPercent=", commitPercent); @@ -335,7 +335,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { for (int i=0; i recoveryInfoF = uLog.applyBufferedUpdates(); if (recoveryInfoF != null) { @@ -1307,7 +1307,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { while (recInfo == null) { try { // wait a short period of time for recovery to complete (and to give a chance for more writers to concurrently add docs) - recInfo = recoveryInfoF.get(random.nextInt(100/nWriteThreads), TimeUnit.MILLISECONDS); + recInfo = recoveryInfoF.get(random().nextInt(100/nWriteThreads), TimeUnit.MILLISECONDS); } catch (TimeoutException e) { // idle one more write thread verbose("Operation",operations.get(),"Draining permits for write thread",writeThreadNumber); @@ -1315,7 +1315,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { if (writeThreadNumber >= nWriteThreads) { // if we hit the end, back up and give a few write permits writeThreadNumber--; - writePermissions[writeThreadNumber].release(random.nextInt(2) + 1); + writePermissions[writeThreadNumber].release(random().nextInt(2) + 1); } // throttle readers so they don't steal too much CPU from the recovery thread @@ -1370,19 +1370,19 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { DirectoryReader reader; @Test public void testStressLuceneNRT() throws Exception { - final int commitPercent = 5 + random.nextInt(20); - final int softCommitPercent = 30+random.nextInt(75); // what percent of the commits are soft - final int deletePercent = 4+random.nextInt(25); - final int deleteByQueryPercent = 1+random.nextInt(5); - final int ndocs = 5 + (random.nextBoolean() ? random.nextInt(25) : random.nextInt(200)); - int nWriteThreads = 5 + random.nextInt(25); + final int commitPercent = 5 + random().nextInt(20); + final int softCommitPercent = 30+random().nextInt(75); // what percent of the commits are soft + final int deletePercent = 4+random().nextInt(25); + final int deleteByQueryPercent = 1+random().nextInt(5); + final int ndocs = 5 + (random().nextBoolean() ? random().nextInt(25) : random().nextInt(200)); + int nWriteThreads = 5 + random().nextInt(25); final int maxConcurrentCommits = nWriteThreads; // number of committers at a time... it should be <= maxWarmingSearchers final AtomicLong operations = new AtomicLong(1000); // number of query operations to perform in total - crank up if - int nReadThreads = 5 + random.nextInt(25); - final boolean tombstones = random.nextBoolean(); - final boolean syncCommits = random.nextBoolean(); + int nReadThreads = 5 + random().nextInt(25); + final boolean tombstones = random().nextBoolean(); + final boolean syncCommits = random().nextBoolean(); verbose("commitPercent=", commitPercent); verbose("softCommitPercent=",softCommitPercent); @@ -1426,7 +1426,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { Directory dir = newDirectory(); - final RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.setDoRandomForceMergeAssert(false); // writer.commit(); @@ -1437,7 +1437,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { for (int i=0; i= max) return min; - return min + random.nextFloat() * (max - min); + return min + random().nextFloat() * (max - min); } @Override @@ -1136,19 +1139,19 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { model.put(doc.id, doc); // commit 10% of the time - if (random.nextInt(commitOneOutOf)==0) { + if (random().nextInt(commitOneOutOf)==0) { assertU(commit()); } // duplicate 10% of the docs - if (random.nextInt(10)==0) { + if (random().nextInt(10)==0) { updateJ(toJSON(doc), null); model.put(doc.id, doc); } } // optimize 10% of the time - if (random.nextInt(10)==0) { + if (random().nextInt(10)==0) { assertU(optimize()); } else { assertU(commit()); @@ -1192,13 +1195,13 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { public static Comparator createSort(IndexSchema schema, List fieldTypes, String[] out) { StringBuilder sortSpec = new StringBuilder(); - int nSorts = random.nextInt(4); + int nSorts = random().nextInt(4); List> comparators = new ArrayList>(); for (int i=0; i0) sortSpec.append(','); - int which = random.nextInt(fieldTypes.size()+2); - boolean asc = random.nextBoolean(); + int which = random().nextInt(fieldTypes.size()+2); + boolean asc = random().nextBoolean(); if (which == fieldTypes.size()) { // sort by score sortSpec.append("score").append(asc ? " asc" : " desc");