diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java index a833ea16d4e..9c6c7304cb7 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java @@ -31,7 +31,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { @@ -786,7 +786,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testRandomBrokenHTML() throws Exception { int maxNumElements = 10000; - String text = _TestUtil.randomHtmlishString(random(), maxNumElements); + String text = TestUtil.randomHtmlishString(random(), maxNumElements); checkAnalysisConsistency(random(), newTestAnalyzer(), random().nextBoolean(), text); } @@ -796,18 +796,18 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { int maxNumWords = 10000; int minWordLength = 3; int maxWordLength = 20; - int numWords = _TestUtil.nextInt(random(), minNumWords, maxNumWords); - switch (_TestUtil.nextInt(random(), 0, 4)) { + int numWords = TestUtil.nextInt(random(), minNumWords, maxNumWords); + switch (TestUtil.nextInt(random(), 0, 4)) { case 0: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomUnicodeString(random(), maxWordLength)); + text.append(TestUtil.randomUnicodeString(random(), maxWordLength)); text.append(' '); } break; } case 1: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomRealisticUnicodeString + text.append(TestUtil.randomRealisticUnicodeString (random(), minWordLength, maxWordLength)); text.append(' '); } @@ -815,7 +815,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { } default: { // ASCII 50% of the time for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomSimpleString(random())); + text.append(TestUtil.randomSimpleString(random())); text.append(' '); } } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java index ea28c74d7d1..55975a30c12 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java @@ -33,8 +33,8 @@ import org.apache.lucene.analysis.CharFilter; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.UnicodeUtil; -import org.apache.lucene.util._TestUtil; public class TestMappingCharFilter extends BaseTokenStreamTestCase { @@ -274,9 +274,9 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { int num = random.nextInt(5); //System.out.println("NormalizeCharMap="); for (int i = 0; i < num; i++) { - String key = _TestUtil.randomSimpleString(random); + String key = TestUtil.randomSimpleString(random); if (!keys.contains(key) && key.length() != 0) { - String value = _TestUtil.randomSimpleString(random); + String value = TestUtil.randomSimpleString(random); builder.add(key, value); keys.add(key); //System.out.println("mapping: '" + key + "' => '" + value + "'"); @@ -294,7 +294,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { System.out.println("\nTEST iter=" + iter); } - final char endLetter = (char) _TestUtil.nextInt(random, 'b', 'z'); + final char endLetter = (char) TestUtil.nextInt(random, 'b', 'z'); final Map map = new HashMap(); final NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder(); @@ -303,9 +303,9 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { System.out.println(" mappings:"); } while (map.size() < numMappings) { - final String key = _TestUtil.randomSimpleStringRange(random, 'a', endLetter, 7); + final String key = TestUtil.randomSimpleStringRange(random, 'a', endLetter, 7); if (key.length() != 0 && !map.containsKey(key)) { - final String value = _TestUtil.randomSimpleString(random); + final String value = TestUtil.randomSimpleString(random); map.put(key, value); builder.add(key, value); if (VERBOSE) { @@ -321,7 +321,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { } for(int iter2=0;iter2<100;iter2++) { - final String content = _TestUtil.randomSimpleStringRange(random, 'a', endLetter, atLeast(1000)); + final String content = TestUtil.randomSimpleStringRange(random, 'a', endLetter, atLeast(1000)); if (VERBOSE) { System.out.println(" content=" + content); @@ -427,7 +427,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { } actualBuilder.append((char) ch); } else { - final char[] buffer = new char[_TestUtil.nextInt(random, 1, 100)]; + final char[] buffer = new char[TestUtil.nextInt(random, 1, 100)]; final int off = buffer.length == 1 ? 0 : random.nextInt(buffer.length-1); final int count = mapFilter.read(buffer, off, buffer.length-off); if (count == -1) { diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java index 0f1ebcf95a7..89667f7271d 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java @@ -30,7 +30,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.BasicOperations; import org.apache.lucene.util.automaton.CharacterRunAutomaton; @@ -76,7 +76,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } }; for (int i = 0; i < 1000; i++) { - String s = _TestUtil.randomSimpleString(random); + String s = TestUtil.randomSimpleString(random); assertEquals(s, left.tokenStream("foo", newStringReader(s)), right.tokenStream("foo", newStringReader(s))); } @@ -97,7 +97,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { }; int numIterations = atLeast(50); for (int i = 0; i < numIterations; i++) { - String s = _TestUtil.randomSimpleString(random, maxLength); + String s = TestUtil.randomSimpleString(random, maxLength); assertEquals(s, left.tokenStream("foo", newStringReader(s)), right.tokenStream("foo", newStringReader(s))); } @@ -114,7 +114,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } }; for (int i = 0; i < 1000; i++) { - String s = _TestUtil.randomHtmlishString(random, 20); + String s = TestUtil.randomHtmlishString(random, 20); assertEquals(s, left.tokenStream("foo", newStringReader(s)), right.tokenStream("foo", newStringReader(s))); } @@ -134,7 +134,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { }; int numIterations = atLeast(50); for (int i = 0; i < numIterations; i++) { - String s = _TestUtil.randomHtmlishString(random, maxLength); + String s = TestUtil.randomHtmlishString(random, maxLength); assertEquals(s, left.tokenStream("foo", newStringReader(s)), right.tokenStream("foo", newStringReader(s))); } @@ -151,7 +151,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } }; for (int i = 0; i < 1000; i++) { - String s = _TestUtil.randomUnicodeString(random); + String s = TestUtil.randomUnicodeString(random); assertEquals(s, left.tokenStream("foo", newStringReader(s)), right.tokenStream("foo", newStringReader(s))); } @@ -171,7 +171,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { }; int numIterations = atLeast(50); for (int i = 0; i < numIterations; i++) { - String s = _TestUtil.randomUnicodeString(random, maxLength); + String s = TestUtil.randomUnicodeString(random, maxLength); assertEquals(s, left.tokenStream("foo", newStringReader(s)), right.tokenStream("foo", newStringReader(s))); } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java index a5ce14fc13e..10d5f180775 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java @@ -37,7 +37,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { @@ -97,21 +97,21 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { writer.close(); IndexReader reader = DirectoryReader.open(dir); - DocsEnum td = _TestUtil.docs(random(), - reader, - "partnum", - new BytesRef("Q36"), - MultiFields.getLiveDocs(reader), - null, - 0); + DocsEnum td = TestUtil.docs(random(), + reader, + "partnum", + new BytesRef("Q36"), + MultiFields.getLiveDocs(reader), + null, + 0); assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); - td = _TestUtil.docs(random(), - reader, - "partnum", - new BytesRef("Q37"), - MultiFields.getLiveDocs(reader), - null, - 0); + td = TestUtil.docs(random(), + reader, + "partnum", + new BytesRef("Q37"), + MultiFields.getLiveDocs(reader), + null, + 0); assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java index 1278b261153..bca5e1ede50 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java @@ -85,8 +85,8 @@ import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.AttributeSource.AttributeFactory; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.Rethrow; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.Version; -import org.apache.lucene.util._TestUtil; import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -305,7 +305,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { // TODO: could cause huge ram usage to use full int range for some filters // (e.g. allocate enormous arrays) // return Integer.valueOf(random.nextInt()); - return Integer.valueOf(_TestUtil.nextInt(random, -100, 100)); + return Integer.valueOf(TestUtil.nextInt(random, -100, 100)); } }); put(char.class, new ArgProducer() { @@ -372,7 +372,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { Collection col = new ArrayList(); int num = random.nextInt(5); for (int i = 0; i < num; i++) { - col.add(_TestUtil.randomSimpleString(random).toCharArray()); + col.add(TestUtil.randomSimpleString(random).toCharArray()); } return col; } @@ -383,7 +383,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, num, random.nextBoolean()); for (int i = 0; i < num; i++) { // TODO: make nastier - set.add(_TestUtil.randomSimpleString(random)); + set.add(TestUtil.randomSimpleString(random)); } return set; } @@ -451,7 +451,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { // a token type return StandardTokenizer.TOKEN_TYPES[random.nextInt(StandardTokenizer.TOKEN_TYPES.length)]; } else { - return _TestUtil.randomSimpleString(random); + return TestUtil.randomSimpleString(random); } } }); @@ -463,9 +463,9 @@ public class TestRandomChains extends BaseTokenStreamTestCase { int num = random.nextInt(5); //System.out.println("NormalizeCharMap="); for (int i = 0; i < num; i++) { - String key = _TestUtil.randomSimpleString(random); + String key = TestUtil.randomSimpleString(random); if (!keys.contains(key) && key.length() > 0) { - String value = _TestUtil.randomSimpleString(random); + String value = TestUtil.randomSimpleString(random); builder.add(key, value); keys.add(key); //System.out.println("mapping: '" + key + "' => '" + value + "'"); @@ -492,7 +492,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { CharArrayMap map = new CharArrayMap(TEST_VERSION_CURRENT, num, random.nextBoolean()); for (int i = 0; i < num; i++) { // TODO: make nastier - map.put(_TestUtil.randomSimpleString(random), _TestUtil.randomSimpleString(random)); + map.put(TestUtil.randomSimpleString(random), TestUtil.randomSimpleString(random)); } return map; } @@ -504,11 +504,11 @@ public class TestRandomChains extends BaseTokenStreamTestCase { for (int i = 0; i < num; i++) { String input = ""; do { - input = _TestUtil.randomRealisticUnicodeString(random); + input = TestUtil.randomRealisticUnicodeString(random); } while(input.isEmpty()); - String out = ""; _TestUtil.randomSimpleString(random); + String out = ""; TestUtil.randomSimpleString(random); do { - out = _TestUtil.randomRealisticUnicodeString(random); + out = TestUtil.randomRealisticUnicodeString(random); } while(out.isEmpty()); builder.add(input, out); } @@ -543,7 +543,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { private String randomNonEmptyString(Random random) { while(true) { - final String s = _TestUtil.randomUnicodeString(random).trim(); + final String s = TestUtil.randomUnicodeString(random).trim(); if (s.length() != 0 && s.indexOf('\u0000') == -1) { return s; } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java index 19604c296bb..dd273fa8dc5 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java @@ -28,7 +28,7 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter; import org.apache.lucene.analysis.util.CharArraySet; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -56,13 +56,13 @@ public class HunspellStemFilterTest extends BaseTokenStreamTestCase { public void testKeywordAttribute() throws IOException { MockTokenizer tokenizer = whitespaceMockTokenizer("lucene is awesome"); tokenizer.setEnableChecks(true); - HunspellStemFilter filter = new HunspellStemFilter(tokenizer, DICTIONARY, _TestUtil.nextInt(random(), 1, 3)); + HunspellStemFilter filter = new HunspellStemFilter(tokenizer, DICTIONARY, TestUtil.nextInt(random(), 1, 3)); assertTokenStreamContents(filter, new String[]{"lucene", "lucen", "is", "awesome"}, new int[] {1, 0, 1, 1}); // assert with keywork marker tokenizer = whitespaceMockTokenizer("lucene is awesome"); CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList("Lucene"), true); - filter = new HunspellStemFilter(new SetKeywordMarkerFilter(tokenizer, set), DICTIONARY, _TestUtil.nextInt(random(), 1, 3)); + filter = new HunspellStemFilter(new SetKeywordMarkerFilter(tokenizer, set), DICTIONARY, TestUtil.nextInt(random(), 1, 3)); assertTokenStreamContents(filter, new String[]{"lucene", "is", "awesome"}, new int[] {1, 1, 1}); } @@ -73,7 +73,7 @@ public class HunspellStemFilterTest extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName) { Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false); - return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, DICTIONARY, _TestUtil.nextInt(random(), 1, 3))); + return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, DICTIONARY, TestUtil.nextInt(random(), 1, 3))); } }; checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER); @@ -84,7 +84,7 @@ public class HunspellStemFilterTest extends BaseTokenStreamTestCase { @Override protected TokenStreamComponents createComponents(String fieldName) { Tokenizer tokenizer = new KeywordTokenizer(); - return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, DICTIONARY, _TestUtil.nextInt(random(), 1, 3))); + return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, DICTIONARY, TestUtil.nextInt(random(), 1, 3))); } }; checkOneTerm(a, "", ""); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCodepointCountFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCodepointCountFilter.java index 55323ff6b7d..8c58f5e6da3 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCodepointCountFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCodepointCountFilter.java @@ -18,16 +18,15 @@ package org.apache.lucene.analysis.miscellaneous; */ import java.io.IOException; -import java.io.Reader; import java.io.StringReader; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; -import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class TestCodepointCountFilter extends BaseTokenStreamTestCase { public void testFilterWithPosIncr() throws Exception { @@ -52,9 +51,9 @@ public class TestCodepointCountFilter extends BaseTokenStreamTestCase { public void testRandomStrings() throws IOException { for (int i = 0; i < 10000; i++) { - String text = _TestUtil.randomUnicodeString(random(), 100); - int min = _TestUtil.nextInt(random(), 0, 100); - int max = _TestUtil.nextInt(random(), 0, 100); + String text = TestUtil.randomUnicodeString(random(), 100); + int min = TestUtil.nextInt(random(), 0, 100); + int max = TestUtil.nextInt(random(), 0, 100); int count = text.codePointCount(0, text.length()); boolean expected = count >= min && count <= max; TokenStream stream = new KeywordTokenizer(); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java index b55482a37c5..1d7fae1ed91 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java @@ -30,7 +30,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestLimitTokenCountAnalyzer extends BaseTokenStreamTestCase { @@ -59,7 +59,7 @@ public class TestLimitTokenCountAnalyzer extends BaseTokenStreamTestCase { for (boolean consumeAll : new boolean[] { true, false }) { Directory dir = newDirectory(); - int limit = _TestUtil.nextInt(random(), 50, 101000); + int limit = TestUtil.nextInt(random(), 50, 101000); MockAnalyzer mock = new MockAnalyzer(random()); // if we are consuming all tokens, we can use the checks, diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java index c50d9fa026a..667bedb6ec3 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java @@ -30,10 +30,9 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.util.CharsRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import java.io.IOException; -import java.io.Reader; import java.util.Iterator; import java.util.Arrays; @@ -129,7 +128,7 @@ public class TestRemoveDuplicatesTokenFilter extends BaseTokenStreamTestCase { // some helper methods for the below test with synonyms private String randomNonEmptyString() { while(true) { - final String s = _TestUtil.randomUnicodeString(random()).trim(); + final String s = TestUtil.randomUnicodeString(random()).trim(); if (s.length() != 0 && s.indexOf('\u0000') == -1) { return s; } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java index 94309c739c3..17f2fbbc931 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java @@ -31,7 +31,7 @@ import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter.StemmerOverrideMap; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * @@ -81,7 +81,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase { Map map = new HashMap(); int numTerms = atLeast(50); for (int i = 0; i < numTerms; i++) { - String randomRealisticUnicodeString = _TestUtil + String randomRealisticUnicodeString = TestUtil .randomRealisticUnicodeString(random()); char[] charArray = randomRealisticUnicodeString.toCharArray(); StringBuilder builder = new StringBuilder(); @@ -93,7 +93,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase { j += Character.charCount(cp); } if (builder.length() > 0) { - String value = _TestUtil.randomSimpleString(random()); + String value = TestUtil.randomSimpleString(random()); map.put(builder.toString(), value.isEmpty() ? "a" : value); @@ -124,10 +124,10 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase { Map map = new HashMap(); int numTerms = atLeast(50); for (int i = 0; i < numTerms; i++) { - String randomRealisticUnicodeString = _TestUtil + String randomRealisticUnicodeString = TestUtil .randomRealisticUnicodeString(random()); if (randomRealisticUnicodeString.length() > 0) { - String value = _TestUtil.randomSimpleString(random()); + String value = TestUtil.randomSimpleString(random()); map.put(randomRealisticUnicodeString, value.isEmpty() ? "a" : value); } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java index 72f49399cfb..062bfc16632 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java @@ -18,7 +18,6 @@ package org.apache.lucene.analysis.ngram; */ import java.io.IOException; -import java.io.Reader; import java.io.StringReader; import java.util.Random; @@ -35,7 +34,8 @@ import org.apache.lucene.analysis.shingle.ShingleFilter; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests {@link EdgeNGramTokenFilter} for correctness. @@ -171,8 +171,8 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { for (int i = 0; i < 10; i++) { - final int min = _TestUtil.nextInt(random(), 2, 10); - final int max = _TestUtil.nextInt(random(), min, 20); + final int min = TestUtil.nextInt(random(), 2, 10); + final int max = TestUtil.nextInt(random(), min, 20); Analyzer a = new Analyzer() { @Override @@ -215,10 +215,10 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase { } public void testSupplementaryCharacters() throws IOException { - final String s = _TestUtil.randomUnicodeString(random(), 10); + final String s = TestUtil.randomUnicodeString(random(), 10); final int codePointCount = s.codePointCount(0, s.length()); - final int minGram = _TestUtil.nextInt(random(), 1, 3); - final int maxGram = _TestUtil.nextInt(random(), minGram, 10); + final int minGram = TestUtil.nextInt(random(), 1, 3); + final int maxGram = TestUtil.nextInt(random(), minGram, 10); TokenStream tk = new KeywordTokenizer(); ((Tokenizer)tk).setReader(new StringReader(s)); tk = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java index 5ebd608d0a1..99b2fb7a6a6 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java @@ -19,14 +19,13 @@ package org.apache.lucene.analysis.ngram; import java.io.IOException; -import java.io.Reader; import java.io.StringReader; import java.util.Arrays; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import com.carrotsearch.randomizedtesting.generators.RandomStrings; @@ -101,8 +100,8 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { for (int i = 0; i < 10; i++) { - final int min = _TestUtil.nextInt(random(), 2, 10); - final int max = _TestUtil.nextInt(random(), min, 20); + final int min = TestUtil.nextInt(random(), 2, 10); + final int max = TestUtil.nextInt(random(), min, 20); Analyzer a = new Analyzer() { @Override @@ -141,47 +140,47 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase { public void testLargeInput() throws IOException { // test sliding - final int minGram = _TestUtil.nextInt(random(), 1, 100); - final int maxGram = _TestUtil.nextInt(random(), minGram, 100); - testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), ""); + final int minGram = TestUtil.nextInt(random(), 1, 100); + final int maxGram = TestUtil.nextInt(random(), minGram, 100); + testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), ""); } public void testLargeMaxGram() throws IOException { // test sliding with maxGram > 1024 - final int minGram = _TestUtil.nextInt(random(), 1290, 1300); - final int maxGram = _TestUtil.nextInt(random(), minGram, 1300); - testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), ""); + final int minGram = TestUtil.nextInt(random(), 1290, 1300); + final int maxGram = TestUtil.nextInt(random(), minGram, 1300); + testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), ""); } public void testPreTokenization() throws IOException { - final int minGram = _TestUtil.nextInt(random(), 1, 100); - final int maxGram = _TestUtil.nextInt(random(), minGram, 100); - testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "a"); + final int minGram = TestUtil.nextInt(random(), 1, 100); + final int maxGram = TestUtil.nextInt(random(), minGram, 100); + testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 0, 4 * 1024), "a"); } public void testHeavyPreTokenization() throws IOException { - final int minGram = _TestUtil.nextInt(random(), 1, 100); - final int maxGram = _TestUtil.nextInt(random(), minGram, 100); - testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "abcdef"); + final int minGram = TestUtil.nextInt(random(), 1, 100); + final int maxGram = TestUtil.nextInt(random(), minGram, 100); + testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 0, 4 * 1024), "abcdef"); } public void testFewTokenChars() throws IOException { - final char[] chrs = new char[_TestUtil.nextInt(random(), 4000, 5000)]; + final char[] chrs = new char[TestUtil.nextInt(random(), 4000, 5000)]; Arrays.fill(chrs, ' '); for (int i = 0; i < chrs.length; ++i) { if (random().nextFloat() < 0.1) { chrs[i] = 'a'; } } - final int minGram = _TestUtil.nextInt(random(), 1, 2); - final int maxGram = _TestUtil.nextInt(random(), minGram, 2); + final int minGram = TestUtil.nextInt(random(), 1, 2); + final int maxGram = TestUtil.nextInt(random(), minGram, 2); testNGrams(minGram, maxGram, new String(chrs), " "); } public void testFullUTF8Range() throws IOException { - final int minGram = _TestUtil.nextInt(random(), 1, 100); - final int maxGram = _TestUtil.nextInt(random(), minGram, 100); - final String s = _TestUtil.randomUnicodeString(random(), 4 * 1024); + final int minGram = TestUtil.nextInt(random(), 1, 100); + final int maxGram = TestUtil.nextInt(random(), minGram, 100); + final String s = TestUtil.randomUnicodeString(random(), 4 * 1024); testNGrams(minGram, maxGram, s, ""); testNGrams(minGram, maxGram, s, "abcdef"); } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java index d27f9058f0a..811a568e813 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java @@ -28,11 +28,10 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.Version; -import org.apache.lucene.util._TestUtil; import java.io.IOException; -import java.io.Reader; import java.io.StringReader; import java.util.Random; @@ -146,8 +145,8 @@ public class NGramTokenFilterTest extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { for (int i = 0; i < 10; i++) { - final int min = _TestUtil.nextInt(random(), 2, 10); - final int max = _TestUtil.nextInt(random(), min, 20); + final int min = TestUtil.nextInt(random(), 2, 10); + final int max = TestUtil.nextInt(random(), min, 20); Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName) { @@ -186,10 +185,10 @@ public class NGramTokenFilterTest extends BaseTokenStreamTestCase { } public void testSupplementaryCharacters() throws IOException { - final String s = _TestUtil.randomUnicodeString(random(), 10); + final String s = TestUtil.randomUnicodeString(random(), 10); final int codePointCount = s.codePointCount(0, s.length()); - final int minGram = _TestUtil.nextInt(random(), 1, 3); - final int maxGram = _TestUtil.nextInt(random(), minGram, 10); + final int minGram = TestUtil.nextInt(random(), 1, 3); + final int maxGram = TestUtil.nextInt(random(), minGram, 10); TokenStream tk = new KeywordTokenizer(); ((Tokenizer)tk).setReader(new StringReader(s)); tk = new NGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java index 40481598a03..a7aa2604fb7 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java @@ -18,22 +18,18 @@ package org.apache.lucene.analysis.ngram; */ -import static org.apache.lucene.analysis.ngram.NGramTokenizerTest.isTokenChar; - import java.io.IOException; -import java.io.Reader; import java.io.StringReader; import java.util.Arrays; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; -import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import com.carrotsearch.randomizedtesting.generators.RandomStrings; @@ -115,8 +111,8 @@ public class NGramTokenizerTest extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { for (int i = 0; i < 10; i++) { - final int min = _TestUtil.nextInt(random(), 2, 10); - final int max = _TestUtil.nextInt(random(), min, 20); + final int min = TestUtil.nextInt(random(), 2, 10); + final int max = TestUtil.nextInt(random(), min, 20); Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName) { @@ -205,47 +201,47 @@ public class NGramTokenizerTest extends BaseTokenStreamTestCase { public void testLargeInput() throws IOException { // test sliding - final int minGram = _TestUtil.nextInt(random(), 1, 100); - final int maxGram = _TestUtil.nextInt(random(), minGram, 100); - testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), ""); + final int minGram = TestUtil.nextInt(random(), 1, 100); + final int maxGram = TestUtil.nextInt(random(), minGram, 100); + testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), ""); } public void testLargeMaxGram() throws IOException { // test sliding with maxGram > 1024 - final int minGram = _TestUtil.nextInt(random(), 1290, 1300); - final int maxGram = _TestUtil.nextInt(random(), minGram, 1300); - testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), ""); + final int minGram = TestUtil.nextInt(random(), 1290, 1300); + final int maxGram = TestUtil.nextInt(random(), minGram, 1300); + testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), ""); } public void testPreTokenization() throws IOException { - final int minGram = _TestUtil.nextInt(random(), 1, 100); - final int maxGram = _TestUtil.nextInt(random(), minGram, 100); - testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "a"); + final int minGram = TestUtil.nextInt(random(), 1, 100); + final int maxGram = TestUtil.nextInt(random(), minGram, 100); + testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 0, 4 * 1024), "a"); } public void testHeavyPreTokenization() throws IOException { - final int minGram = _TestUtil.nextInt(random(), 1, 100); - final int maxGram = _TestUtil.nextInt(random(), minGram, 100); - testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "abcdef"); + final int minGram = TestUtil.nextInt(random(), 1, 100); + final int maxGram = TestUtil.nextInt(random(), minGram, 100); + testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 0, 4 * 1024), "abcdef"); } public void testFewTokenChars() throws IOException { - final char[] chrs = new char[_TestUtil.nextInt(random(), 4000, 5000)]; + final char[] chrs = new char[TestUtil.nextInt(random(), 4000, 5000)]; Arrays.fill(chrs, ' '); for (int i = 0; i < chrs.length; ++i) { if (random().nextFloat() < 0.1) { chrs[i] = 'a'; } } - final int minGram = _TestUtil.nextInt(random(), 1, 2); - final int maxGram = _TestUtil.nextInt(random(), minGram, 2); + final int minGram = TestUtil.nextInt(random(), 1, 2); + final int maxGram = TestUtil.nextInt(random(), minGram, 2); testNGrams(minGram, maxGram, new String(chrs), " "); } public void testFullUTF8Range() throws IOException { - final int minGram = _TestUtil.nextInt(random(), 1, 100); - final int maxGram = _TestUtil.nextInt(random(), minGram, 100); - final String s = _TestUtil.randomUnicodeString(random(), 4 * 1024); + final int minGram = TestUtil.nextInt(random(), 1, 100); + final int maxGram = TestUtil.nextInt(random(), minGram, 100); + final String s = TestUtil.randomUnicodeString(random(), 4 * 1024); testNGrams(minGram, maxGram, s, ""); testNGrams(minGram, maxGram, s, "abcdef"); } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java index 12175ede18a..f3cf4b84255 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java @@ -30,7 +30,7 @@ import org.apache.lucene.analysis.CharFilter; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Ignore; /** @@ -302,9 +302,9 @@ public class TestPatternReplaceCharFilter extends BaseTokenStreamTestCase { int numPatterns = 10 + random().nextInt(20); Random random = new Random(random().nextLong()); for (int i = 0; i < numPatterns; i++) { - final Pattern p = _TestUtil.randomPattern(random()); + final Pattern p = TestUtil.randomPattern(random()); - final String replacement = _TestUtil.randomSimpleString(random); + final String replacement = TestUtil.randomSimpleString(random); Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName) { diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java index 7fd013e6460..946c902af6b 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java @@ -18,7 +18,6 @@ package org.apache.lucene.analysis.synonym; import java.io.IOException; -import java.io.Reader; import java.io.StringReader; import java.util.ArrayList; import java.util.Arrays; @@ -39,7 +38,7 @@ import org.apache.lucene.analysis.MockGraphTokenFilter; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.tokenattributes.*; import org.apache.lucene.util.CharsRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestSynonymMapFilter extends BaseTokenStreamTestCase { @@ -383,7 +382,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { public void testRandom() throws Exception { - final int alphabetSize = _TestUtil.nextInt(random(), 2, 7); + final int alphabetSize = TestUtil.nextInt(random(), 2, 7); final int docLen = atLeast(3000); //final int docLen = 50; @@ -405,7 +404,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { } b = new SynonymMap.Builder(dedup); for(int synIDX=0;synIDX extends LuceneTestCase { private String createRandomString(Random random) { StringBuilder builder = new StringBuilder(); for (int i = 0; i < 20; i++) { - builder.append(_TestUtil.randomSimpleString(random, 5)); + builder.append(TestUtil.randomSimpleString(random, 5)); builder.append(" "); } return builder.toString(); diff --git a/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java b/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java index 6e74cce1a11..84cccb7a94e 100644 --- a/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java +++ b/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java @@ -30,7 +30,7 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SlowCompositeReaderWrapper; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.LuceneTestCase; import org.junit.After; import org.junit.Before; @@ -71,8 +71,8 @@ public class DataSplitterTest extends LuceneTestCase { for (int i = 0; i < 100; i++) { doc = new Document(); doc.add(new Field(idFieldName, Integer.toString(i), ft)); - doc.add(new Field(textFieldName, _TestUtil.randomUnicodeString(rnd, 1024), ft)); - doc.add(new Field(classFieldName, _TestUtil.randomUnicodeString(rnd, 10), ft)); + doc.add(new Field(textFieldName, TestUtil.randomUnicodeString(rnd, 1024), ft)); + doc.add(new Field(classFieldName, TestUtil.randomUnicodeString(rnd, 10), ft)); indexWriter.addDocument(doc, analyzer); } diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestFixedGapPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestFixedGapPostingsFormat.java index 1aaeeff9cba..c05cc140689 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestFixedGapPostingsFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestFixedGapPostingsFormat.java @@ -20,13 +20,14 @@ package org.apache.lucene.codecs.blockterms; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene41ords.Lucene41WithOrds; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * Basic tests of a PF using FixedGap terms dictionary */ public class TestFixedGapPostingsFormat extends BasePostingsFormatTestCase { - private final Codec codec = _TestUtil.alwaysPostingsFormat(new Lucene41WithOrds(_TestUtil.nextInt(random(), 1, 1000))); + private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41WithOrds(TestUtil.nextInt(random(), 1, 1000))); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapDocFreqIntervalPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapDocFreqIntervalPostingsFormat.java index 16ae249695c..59608f86a16 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapDocFreqIntervalPostingsFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapDocFreqIntervalPostingsFormat.java @@ -20,13 +20,13 @@ package org.apache.lucene.codecs.blockterms; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene41vargap.Lucene41VarGapFixedInterval; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Basic tests of a PF using VariableGap terms dictionary (fixed interval) */ public class TestVarGapDocFreqIntervalPostingsFormat extends BasePostingsFormatTestCase { - private final Codec codec = _TestUtil.alwaysPostingsFormat(new Lucene41VarGapFixedInterval(_TestUtil.nextInt(random(), 1, 1000))); + private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41VarGapFixedInterval(TestUtil.nextInt(random(), 1, 1000))); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapFixedIntervalPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapFixedIntervalPostingsFormat.java index 4ae298d5a8b..d0935a1c33b 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapFixedIntervalPostingsFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapFixedIntervalPostingsFormat.java @@ -20,13 +20,13 @@ package org.apache.lucene.codecs.blockterms; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene41vargap.Lucene41VarGapDocFreqInterval; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Basic tests of a PF using VariableGap terms dictionary (fixed interval, docFreq threshold) */ public class TestVarGapFixedIntervalPostingsFormat extends BasePostingsFormatTestCase { - private final Codec codec = _TestUtil.alwaysPostingsFormat(new Lucene41VarGapDocFreqInterval(_TestUtil.nextInt(random(), 1, 100), _TestUtil.nextInt(random(), 1, 1000))); + private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41VarGapDocFreqInterval(TestUtil.nextInt(random(), 1, 100), TestUtil.nextInt(random(), 1, 1000))); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/bloom/TestBloomPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/bloom/TestBloomPostingsFormat.java index ceff6b9f974..b844d6f83e0 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/bloom/TestBloomPostingsFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/bloom/TestBloomPostingsFormat.java @@ -19,13 +19,13 @@ package org.apache.lucene.codecs.bloom; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Basic tests for BloomPostingsFormat */ public class TestBloomPostingsFormat extends BasePostingsFormatTestCase { - private final Codec codec = _TestUtil.alwaysPostingsFormat(new TestBloomFilteredLucene41Postings()); + private final Codec codec = TestUtil.alwaysPostingsFormat(new TestBloomFilteredLucene41Postings()); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/diskdv/TestDiskDocValuesFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/diskdv/TestDiskDocValuesFormat.java index 1761e4fab0b..b6cff4410da 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/diskdv/TestDiskDocValuesFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/diskdv/TestDiskDocValuesFormat.java @@ -19,13 +19,13 @@ package org.apache.lucene.codecs.diskdv; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests DiskDocValuesFormat */ public class TestDiskDocValuesFormat extends BaseCompressingDocValuesFormatTestCase { - private final Codec codec = _TestUtil.alwaysDocValuesFormat(new DiskDocValuesFormat()); + private final Codec codec = TestUtil.alwaysDocValuesFormat(new DiskDocValuesFormat()); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/intblock/TestFixedIntBlockPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/intblock/TestFixedIntBlockPostingsFormat.java index 0793e75f25c..2bf392ce131 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/intblock/TestFixedIntBlockPostingsFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/intblock/TestFixedIntBlockPostingsFormat.java @@ -20,14 +20,14 @@ package org.apache.lucene.codecs.intblock; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.mockintblock.MockFixedIntBlockPostingsFormat; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Basic tests for FixedIntBlock */ public class TestFixedIntBlockPostingsFormat extends BasePostingsFormatTestCase { // TODO: randomize blocksize - private final Codec codec = _TestUtil.alwaysPostingsFormat(new MockFixedIntBlockPostingsFormat()); + private final Codec codec = TestUtil.alwaysPostingsFormat(new MockFixedIntBlockPostingsFormat()); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/intblock/TestVariableIntBlockPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/intblock/TestVariableIntBlockPostingsFormat.java index 8671fe74778..201537dbc63 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/intblock/TestVariableIntBlockPostingsFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/intblock/TestVariableIntBlockPostingsFormat.java @@ -20,14 +20,15 @@ package org.apache.lucene.codecs.intblock; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.mockintblock.MockVariableIntBlockPostingsFormat; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * Basic tests for VariableIntBlock */ public class TestVariableIntBlockPostingsFormat extends BasePostingsFormatTestCase { // TODO: randomize blocksize - private final Codec codec = _TestUtil.alwaysPostingsFormat( new MockVariableIntBlockPostingsFormat()); + private final Codec codec = TestUtil.alwaysPostingsFormat(new MockVariableIntBlockPostingsFormat()); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectDocValuesFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectDocValuesFormat.java index fa6215284bb..1073c1107e7 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectDocValuesFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectDocValuesFormat.java @@ -19,13 +19,13 @@ package org.apache.lucene.codecs.memory; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.BaseDocValuesFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests DirectDocValuesFormat */ public class TestDirectDocValuesFormat extends BaseDocValuesFormatTestCase { - private final Codec codec = _TestUtil.alwaysDocValuesFormat(new DirectDocValuesFormat()); + private final Codec codec = TestUtil.alwaysDocValuesFormat(new DirectDocValuesFormat()); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java index 85b42901160..2b785e47400 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java @@ -19,14 +19,15 @@ package org.apache.lucene.codecs.memory; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests DirectPostingsFormat */ public class TestDirectPostingsFormat extends BasePostingsFormatTestCase { // TODO: randomize parameters - private final Codec codec = _TestUtil.alwaysPostingsFormat(new DirectPostingsFormat()); + private final Codec codec = TestUtil.alwaysPostingsFormat(new DirectPostingsFormat()); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestMemoryDocValuesFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestMemoryDocValuesFormat.java index 77c6ea582a8..81b2367f399 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestMemoryDocValuesFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestMemoryDocValuesFormat.java @@ -19,13 +19,13 @@ package org.apache.lucene.codecs.memory; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests MemoryDocValuesFormat */ public class TestMemoryDocValuesFormat extends BaseCompressingDocValuesFormatTestCase { - private final Codec codec = _TestUtil.alwaysDocValuesFormat(new MemoryDocValuesFormat()); + private final Codec codec = TestUtil.alwaysDocValuesFormat(new MemoryDocValuesFormat()); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestMemoryPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestMemoryPostingsFormat.java index da9e3c83c8a..524f9704d55 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestMemoryPostingsFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestMemoryPostingsFormat.java @@ -19,14 +19,15 @@ package org.apache.lucene.codecs.memory; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests MemoryPostingsFormat */ public class TestMemoryPostingsFormat extends BasePostingsFormatTestCase { // TODO: randomize doPack - private final Codec codec = _TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat()); + private final Codec codec = TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat()); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java b/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java index 4c07479b81b..eaf96396b98 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java @@ -37,9 +37,8 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.BaseDirectoryWrapper; -import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Pulses 10k terms/docs, @@ -51,9 +50,9 @@ import org.apache.lucene.util._TestUtil; public class Test10KPulsings extends LuceneTestCase { public void test10kPulsed() throws Exception { // we always run this test with pulsing codec. - Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1)); + Codec cp = TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1)); - File f = _TestUtil.getTempDir("10kpulsed"); + File f = TestUtil.getTempDir("10kpulsed"); BaseDirectoryWrapper dir = newFSDirectory(f); dir.setCheckIndexOnClose(false); // we do this ourselves explicitly RandomIndexWriter iw = new RandomIndexWriter(random(), dir, @@ -62,7 +61,7 @@ public class Test10KPulsings extends LuceneTestCase { Document document = new Document(); FieldType ft = new FieldType(TextField.TYPE_STORED); - switch(_TestUtil.nextInt(random(), 0, 2)) { + switch(TestUtil.nextInt(random(), 0, 2)) { case 0: ft.setIndexOptions(IndexOptions.DOCS_ONLY); break; case 1: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); break; default: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); break; @@ -87,13 +86,13 @@ public class Test10KPulsings extends LuceneTestCase { for (int i = 0; i < 10050; i++) { String expected = df.format(i); assertEquals(expected, te.next().utf8ToString()); - de = _TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE); + de = TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE); assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc()); } ir.close(); - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); dir.close(); } @@ -101,10 +100,10 @@ public class Test10KPulsings extends LuceneTestCase { */ public void test10kNotPulsed() throws Exception { // we always run this test with pulsing codec. - int freqCutoff = _TestUtil.nextInt(random(), 1, 10); - Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(freqCutoff)); + int freqCutoff = TestUtil.nextInt(random(), 1, 10); + Codec cp = TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(freqCutoff)); - File f = _TestUtil.getTempDir("10knotpulsed"); + File f = TestUtil.getTempDir("10knotpulsed"); BaseDirectoryWrapper dir = newFSDirectory(f); dir.setCheckIndexOnClose(false); // we do this ourselves explicitly RandomIndexWriter iw = new RandomIndexWriter(random(), dir, @@ -113,7 +112,7 @@ public class Test10KPulsings extends LuceneTestCase { Document document = new Document(); FieldType ft = new FieldType(TextField.TYPE_STORED); - switch(_TestUtil.nextInt(random(), 0, 2)) { + switch(TestUtil.nextInt(random(), 0, 2)) { case 0: ft.setIndexOptions(IndexOptions.DOCS_ONLY); break; case 1: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); break; default: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); break; @@ -145,13 +144,13 @@ public class Test10KPulsings extends LuceneTestCase { for (int i = 0; i < 10050; i++) { String expected = df.format(i); assertEquals(expected, te.next().utf8ToString()); - de = _TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE); + de = TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE); assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc()); } ir.close(); - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); dir.close(); } } diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingPostingsFormat.java index cf8ab1df0b4..440ac38d9c4 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingPostingsFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingPostingsFormat.java @@ -19,14 +19,15 @@ package org.apache.lucene.codecs.pulsing; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests PulsingPostingsFormat */ public class TestPulsingPostingsFormat extends BasePostingsFormatTestCase { // TODO: randomize cutoff - private final Codec codec = _TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat()); + private final Codec codec = TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat()); @Override protected Codec getCodec() { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java b/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java index d849951bff6..a0e3282e3bb 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java @@ -27,7 +27,6 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.TextField; import org.apache.lucene.index.AtomicReader; -import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsEnum; @@ -36,7 +35,7 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests that pulsing codec reuses its enums and wrapped enums @@ -45,7 +44,7 @@ public class TestPulsingReuse extends LuceneTestCase { // TODO: this is a basic test. this thing is complicated, add more public void testSophisticatedReuse() throws Exception { // we always run this test with pulsing codec. - Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1)); + Codec cp = TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1)); Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); @@ -83,7 +82,7 @@ public class TestPulsingReuse extends LuceneTestCase { /** tests reuse with Pulsing1(Pulsing2(Standard)) */ public void testNestedPulsing() throws Exception { // we always run this test with pulsing codec. - Codec cp = _TestUtil.alwaysPostingsFormat(new NestedPulsingPostingsFormat()); + Codec cp = TestUtil.alwaysPostingsFormat(new NestedPulsingPostingsFormat()); BaseDirectoryWrapper dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/sep/TestSepPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/sep/TestSepPostingsFormat.java index 64dd6618d5e..e49e1894a03 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/sep/TestSepPostingsFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/sep/TestSepPostingsFormat.java @@ -20,14 +20,14 @@ package org.apache.lucene.codecs.sep; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.mocksep.MockSepPostingsFormat; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests sep layout */ public class TestSepPostingsFormat extends BasePostingsFormatTestCase { // TODO: randomize cutoff - private final Codec codec = _TestUtil.alwaysPostingsFormat(new MockSepPostingsFormat()); + private final Codec codec = TestUtil.alwaysPostingsFormat(new MockSepPostingsFormat()); @Override protected Codec getCodec() { diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java index a1197237d2f..39764164410 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java @@ -33,7 +33,7 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.AutomatonTestUtil; import org.apache.lucene.util.automaton.BasicAutomata; @@ -232,7 +232,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { for (int i = 0; i < iters; i++) { final CharacterRunAutomaton dfa = new CharacterRunAutomaton(AutomatonTestUtil.randomAutomaton(random())); final boolean lowercase = random().nextBoolean(); - final int limit = _TestUtil.nextInt(random(), 0, 500); + final int limit = TestUtil.nextInt(random(), 0, 500); Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName) { @@ -248,7 +248,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { public void testForwardOffsets() throws Exception { int num = atLeast(10000); for (int i = 0; i < num; i++) { - String s = _TestUtil.randomHtmlishString(random(), 20); + String s = TestUtil.randomHtmlishString(random(), 20); StringReader reader = new StringReader(s); MockCharFilter charfilter = new MockCharFilter(reader, 2); MockAnalyzer analyzer = new MockAnalyzer(random()); diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestToken.java b/lucene/core/src/test/org/apache/lucene/analysis/TestToken.java index 7567198bad6..acd3e4f93cf 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/TestToken.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/TestToken.java @@ -22,7 +22,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.Attribute; import org.apache.lucene.util.AttributeImpl; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import java.io.StringReader; import java.util.HashMap; @@ -246,17 +246,17 @@ public class TestToken extends LuceneTestCase { public void testAttributeReflection() throws Exception { Token t = new Token("foobar", 6, 22, 8); - _TestUtil.assertAttributeReflection(t, - new HashMap() {{ - put(CharTermAttribute.class.getName() + "#term", "foobar"); - put(TermToBytesRefAttribute.class.getName() + "#bytes", new BytesRef("foobar")); - put(OffsetAttribute.class.getName() + "#startOffset", 6); - put(OffsetAttribute.class.getName() + "#endOffset", 22); - put(PositionIncrementAttribute.class.getName() + "#positionIncrement", 1); - put(PayloadAttribute.class.getName() + "#payload", null); - put(TypeAttribute.class.getName() + "#type", TypeAttribute.DEFAULT_TYPE); - put(FlagsAttribute.class.getName() + "#flags", 8); - }}); + TestUtil.assertAttributeReflection(t, + new HashMap() {{ + put(CharTermAttribute.class.getName() + "#term", "foobar"); + put(TermToBytesRefAttribute.class.getName() + "#bytes", new BytesRef("foobar")); + put(OffsetAttribute.class.getName() + "#startOffset", 6); + put(OffsetAttribute.class.getName() + "#endOffset", 22); + put(PositionIncrementAttribute.class.getName() + "#positionIncrement", 1); + put(PayloadAttribute.class.getName() + "#payload", null); + put(TypeAttribute.class.getName() + "#type", TypeAttribute.DEFAULT_TYPE); + put(FlagsAttribute.class.getName() + "#flags", 8); + }}); } diff --git a/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java b/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java index f1cfafee79f..2562201d8d3 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java @@ -20,7 +20,8 @@ package org.apache.lucene.analysis.tokenattributes; import org.apache.lucene.analysis.TestToken; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; + import java.nio.CharBuffer; import java.util.HashMap; import java.util.Formatter; @@ -132,7 +133,7 @@ public class TestCharTermAttributeImpl extends LuceneTestCase { public void testAttributeReflection() throws Exception { CharTermAttributeImpl t = new CharTermAttributeImpl(); t.append("foobar"); - _TestUtil.assertAttributeReflection(t, new HashMap() {{ + TestUtil.assertAttributeReflection(t, new HashMap() {{ put(CharTermAttribute.class.getName() + "#term", "foobar"); put(TermToBytesRefAttribute.class.getName() + "#bytes", new BytesRef("foobar")); }}); diff --git a/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpl.java b/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpl.java index 79ffe9a5e31..7d547ea35f0 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpl.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpl.java @@ -17,7 +17,7 @@ package org.apache.lucene.analysis.tokenattributes; * limitations under the License. */ -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.LuceneTestCase; import java.util.Collections; @@ -27,21 +27,21 @@ public class TestSimpleAttributeImpl extends LuceneTestCase { // this checks using reflection API if the defaults are correct public void testAttributes() { - _TestUtil.assertAttributeReflection(new PositionIncrementAttributeImpl(), - Collections.singletonMap(PositionIncrementAttribute.class.getName()+"#positionIncrement", 1)); - _TestUtil.assertAttributeReflection(new PositionLengthAttributeImpl(), - Collections.singletonMap(PositionLengthAttribute.class.getName()+"#positionLength", 1)); - _TestUtil.assertAttributeReflection(new FlagsAttributeImpl(), - Collections.singletonMap(FlagsAttribute.class.getName()+"#flags", 0)); - _TestUtil.assertAttributeReflection(new TypeAttributeImpl(), - Collections.singletonMap(TypeAttribute.class.getName()+"#type", TypeAttribute.DEFAULT_TYPE)); - _TestUtil.assertAttributeReflection(new PayloadAttributeImpl(), - Collections.singletonMap(PayloadAttribute.class.getName()+"#payload", null)); - _TestUtil.assertAttributeReflection(new KeywordAttributeImpl(), - Collections.singletonMap(KeywordAttribute.class.getName()+"#keyword", false)); - _TestUtil.assertAttributeReflection(new OffsetAttributeImpl(), new HashMap() {{ - put(OffsetAttribute.class.getName()+"#startOffset", 0); - put(OffsetAttribute.class.getName()+"#endOffset", 0); + TestUtil.assertAttributeReflection(new PositionIncrementAttributeImpl(), + Collections.singletonMap(PositionIncrementAttribute.class.getName() + "#positionIncrement", 1)); + TestUtil.assertAttributeReflection(new PositionLengthAttributeImpl(), + Collections.singletonMap(PositionLengthAttribute.class.getName() + "#positionLength", 1)); + TestUtil.assertAttributeReflection(new FlagsAttributeImpl(), + Collections.singletonMap(FlagsAttribute.class.getName() + "#flags", 0)); + TestUtil.assertAttributeReflection(new TypeAttributeImpl(), + Collections.singletonMap(TypeAttribute.class.getName() + "#type", TypeAttribute.DEFAULT_TYPE)); + TestUtil.assertAttributeReflection(new PayloadAttributeImpl(), + Collections.singletonMap(PayloadAttribute.class.getName() + "#payload", null)); + TestUtil.assertAttributeReflection(new KeywordAttributeImpl(), + Collections.singletonMap(KeywordAttribute.class.getName() + "#keyword", false)); + TestUtil.assertAttributeReflection(new OffsetAttributeImpl(), new HashMap() {{ + put(OffsetAttribute.class.getName() + "#startOffset", 0); + put(OffsetAttribute.class.getName() + "#endOffset", 0); }}); } diff --git a/lucene/core/src/test/org/apache/lucene/codecs/compressing/AbstractTestCompressionMode.java b/lucene/core/src/test/org/apache/lucene/codecs/compressing/AbstractTestCompressionMode.java index 33fbfb59e65..305bf38741f 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/compressing/AbstractTestCompressionMode.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/compressing/AbstractTestCompressionMode.java @@ -24,7 +24,7 @@ import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import com.carrotsearch.randomizedtesting.generators.RandomInts; @@ -85,8 +85,8 @@ public abstract class AbstractTestCompressionMode extends LuceneTestCase { final int iterations = atLeast(10); for (int i = 0; i < iterations; ++i) { final byte[] decompressed = randomArray(); - final int off = random().nextBoolean() ? 0 : _TestUtil.nextInt(random(), 0, decompressed.length); - final int len = random().nextBoolean() ? decompressed.length - off : _TestUtil.nextInt(random(), 0, decompressed.length - off); + final int off = random().nextBoolean() ? 0 : TestUtil.nextInt(random(), 0, decompressed.length); + final int len = random().nextBoolean() ? decompressed.length - off : TestUtil.nextInt(random(), 0, decompressed.length - off); final byte[] compressed = compress(decompressed, off, len); final byte[] restored = decompress(compressed, len); assertArrayEquals(Arrays.copyOfRange(decompressed, off, off+len), restored); @@ -138,7 +138,7 @@ public abstract class AbstractTestCompressionMode extends LuceneTestCase { } public void testConstant() throws IOException { - final byte[] decompressed = new byte[_TestUtil.nextInt(random(), 1, 10000)]; + final byte[] decompressed = new byte[TestUtil.nextInt(random(), 1, 10000)]; Arrays.fill(decompressed, (byte) random().nextInt()); test(decompressed); } diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestBitVector.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestBitVector.java index f8e294c321c..7e09e86a335 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestBitVector.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestBitVector.java @@ -23,7 +23,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * TestBitVector tests the BitVector, obviously. @@ -231,10 +232,10 @@ public class TestBitVector extends LuceneTestCase public void testClearedBitNearEnd() throws IOException { Directory d = newDirectory(); - final int numBits = _TestUtil.nextInt(random(), 7, 1000); + final int numBits = TestUtil.nextInt(random(), 7, 1000); BitVector bv = new BitVector(numBits); bv.invertAll(); - bv.clear(numBits-_TestUtil.nextInt(random(), 1, 7)); + bv.clear(numBits- TestUtil.nextInt(random(), 1, 7)); bv.write(d, "test", newIOContext(random())); assertEquals(numBits-1, bv.count()); d.close(); @@ -242,7 +243,7 @@ public class TestBitVector extends LuceneTestCase public void testMostlySet() throws IOException { Directory d = newDirectory(); - final int numBits = _TestUtil.nextInt(random(), 30, 1000); + final int numBits = TestUtil.nextInt(random(), 30, 1000); for(int numClear=0;numClear<20;numClear++) { BitVector bv = new BitVector(numBits); bv.invertAll(); diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestLucene40PostingsReader.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestLucene40PostingsReader.java index fd97279391b..a18d6cdba10 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestLucene40PostingsReader.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestLucene40PostingsReader.java @@ -33,7 +33,7 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.BeforeClass; public class TestLucene40PostingsReader extends LuceneTestCase { @@ -53,7 +53,7 @@ public class TestLucene40PostingsReader extends LuceneTestCase { * depends heavily on term vectors cross-check at checkIndex */ public void testPostings() throws Exception { - Directory dir = newFSDirectory(_TestUtil.getTempDir("postings")); + Directory dir = newFSDirectory(TestUtil.getTempDir("postings")); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); iwc.setCodec(Codec.forName("Lucene40")); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); @@ -116,7 +116,7 @@ public class TestLucene40PostingsReader extends LuceneTestCase { StringBuilder sb = new StringBuilder(); int i = random().nextInt(terms.length); while (i < terms.length) { - int tf = _TestUtil.nextInt(random(), 1, maxTF); + int tf = TestUtil.nextInt(random(), 1, maxTF); for (int j = 0; j < tf; j++) { shuffled.add(terms[i]); } diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java index 3c0cc760257..2a8aada85e8 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java @@ -36,7 +36,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.BeforeClass; // TODO: really this should be in BaseTestPF or somewhere else? useful test! @@ -49,7 +49,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { public void testReuseDocsEnumNoReuse() throws IOException { Directory dir = newDirectory(); - Codec cp = _TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat()); + Codec cp = TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); int numdocs = atLeast(20); @@ -76,7 +76,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { // tests for reuse only if bits are the same either null or the same instance public void testReuseDocsEnumSameBitsOrNull() throws IOException { Directory dir = newDirectory(); - Codec cp = _TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat()); + Codec cp = TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); int numdocs = atLeast(20); @@ -120,7 +120,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { // make sure we never reuse from another reader even if it is the same field & codec etc public void testReuseDocsEnumDifferentReader() throws IOException { Directory dir = newDirectory(); - Codec cp = _TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat()); + Codec cp = TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat()); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp)); int numdocs = atLeast(20); diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat.java index 1aa28360c0b..77ae0dfc099 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat.java @@ -19,13 +19,13 @@ package org.apache.lucene.codecs.lucene41; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests BlockPostingsFormat */ public class TestBlockPostingsFormat extends BasePostingsFormatTestCase { - private final Codec codec = _TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()); + private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()); @Override protected Codec getCodec() { diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat2.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat2.java index 7f8287b89f2..f336bd6a26f 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat2.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat2.java @@ -30,7 +30,7 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests special cases of BlockPostingsFormat @@ -43,9 +43,9 @@ public class TestBlockPostingsFormat2 extends LuceneTestCase { @Override public void setUp() throws Exception { super.setUp(); - dir = newFSDirectory(_TestUtil.getTempDir("testDFBlockSize")); + dir = newFSDirectory(TestUtil.getTempDir("testDFBlockSize")); iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); - iwc.setCodec(_TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())); + iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())); iw = new RandomIndexWriter(random(), dir, iwc.clone()); iw.setDoRandomForceMerge(false); // we will ourselves } @@ -53,7 +53,7 @@ public class TestBlockPostingsFormat2 extends LuceneTestCase { @Override public void tearDown() throws Exception { iw.close(); - _TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge + TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge iwc.setOpenMode(OpenMode.APPEND); IndexWriter iw = new IndexWriter(dir, iwc.clone()); iw.forceMerge(1); diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java index 844ff2cb48c..c4592e9b3a4 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java @@ -17,7 +17,6 @@ package org.apache.lucene.codecs.lucene41; * limitations under the License. */ -import java.io.Reader; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -53,7 +52,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.English; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.automaton.AutomatonTestUtil; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.RegExp; @@ -83,7 +83,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase { } }; IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer); - iwc.setCodec(_TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())); + iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())); // TODO we could actually add more fields implemented with different PFs // or, just put this test into the usual rotation? RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc.clone()); @@ -123,7 +123,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase { doc.add(field7); doc.add(field8); for (int i = 0; i < MAXDOC; i++) { - String stringValue = Integer.toString(i) + " verycommon " + English.intToEnglish(i).replace('-', ' ') + " " + _TestUtil.randomSimpleString(random()); + String stringValue = Integer.toString(i) + " verycommon " + English.intToEnglish(i).replace('-', ' ') + " " + TestUtil.randomSimpleString(random()); field1.setStringValue(stringValue); field2.setStringValue(stringValue); field3.setStringValue(stringValue); @@ -136,7 +136,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase { } iw.close(); verify(dir); - _TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge + TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge iwc.setOpenMode(OpenMode.APPEND); IndexWriter iw2 = new IndexWriter(dir, iwc.clone()); iw2.forceMerge(1); diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene45/TestLucene45DocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene45/TestLucene45DocValuesFormat.java index 3f6171acd62..ad51a936cf4 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene45/TestLucene45DocValuesFormat.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene45/TestLucene45DocValuesFormat.java @@ -19,13 +19,13 @@ package org.apache.lucene.codecs.lucene45; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests Lucene45DocValuesFormat */ public class TestLucene45DocValuesFormat extends BaseCompressingDocValuesFormatTestCase { - private final Codec codec = _TestUtil.alwaysDocValuesFormat(new Lucene45DocValuesFormat()); + private final Codec codec = TestUtil.alwaysDocValuesFormat(new Lucene45DocValuesFormat()); @Override protected Codec getCodec() { diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java index 3bc908ccfad..76f67d4a8f2 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java @@ -46,7 +46,8 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * Basic tests of PerFieldDocValuesFormat @@ -67,7 +68,7 @@ public class TestPerFieldDocValuesFormat extends BaseDocValuesFormatTestCase { @Override protected boolean codecAcceptsHugeBinaryValues(String field) { - return _TestUtil.fieldSupportsHugeBinaryDocValues(field); + return TestUtil.fieldSupportsHugeBinaryDocValues(field); } // just a simple trivial test diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java index 0d92b7c2c12..050770a486f 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java @@ -43,7 +43,8 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Test; /** @@ -107,7 +108,7 @@ public class TestPerFieldPostingsFormat2 extends LuceneTestCase { addDocs2(writer, 10); writer.commit(); assertEquals(30, writer.maxDoc()); - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); writer.forceMerge(1); assertEquals(30, writer.maxDoc()); writer.close(); @@ -240,7 +241,7 @@ public class TestPerFieldPostingsFormat2 extends LuceneTestCase { final int docsPerRound = 97; int numRounds = atLeast(1); for (int i = 0; i < numRounds; i++) { - int num = _TestUtil.nextInt(random(), 30, 60); + int num = TestUtil.nextInt(random(), 30, 60); IndexWriterConfig config = newIndexWriterConfig(random(), TEST_VERSION_CURRENT, new MockAnalyzer(random())); config.setOpenMode(OpenMode.CREATE_OR_APPEND); @@ -251,7 +252,7 @@ public class TestPerFieldPostingsFormat2 extends LuceneTestCase { FieldType customType = new FieldType(TextField.TYPE_NOT_STORED); customType.setTokenized(random().nextBoolean()); customType.setOmitNorms(random().nextBoolean()); - Field field = newField("" + k, _TestUtil + Field field = newField("" + k, TestUtil .randomRealisticUnicodeString(random(), 128), customType); doc.add(field); } diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java b/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java index 6ad9c63a437..074144624ab 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java @@ -26,8 +26,8 @@ import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.TimeUnits; -import org.apache.lucene.util._TestUtil; import org.junit.Ignore; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; @@ -38,7 +38,7 @@ public class Test2BBinaryDocValues extends LuceneTestCase { // indexes Integer.MAX_VALUE docs with a fixed binary field public void testFixedBinary() throws Exception { - BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BFixedBinary")); + BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BFixedBinary")); if (dir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER); } @@ -98,7 +98,7 @@ public class Test2BBinaryDocValues extends LuceneTestCase { // indexes Integer.MAX_VALUE docs with a variable binary field public void testVariableBinary() throws Exception { - BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BVariableBinary")); + BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BVariableBinary")); if (dir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER); } diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BDocs.java b/lucene/core/src/test/org/apache/lucene/index/Test2BDocs.java index e529a05ba67..f2985210fab 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BDocs.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BDocs.java @@ -22,7 +22,7 @@ import java.util.Arrays; import org.apache.lucene.document.Document; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -31,7 +31,7 @@ public class Test2BDocs extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { - dir = newFSDirectory(_TestUtil.getTempDir("2Bdocs")); + dir = newFSDirectory(TestUtil.getTempDir("2Bdocs")); IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, null)); Document doc = new Document(); for (int i = 0; i < 262144; i++) { @@ -61,7 +61,7 @@ public class Test2BDocs extends LuceneTestCase { } public void testExactlyAtLimit() throws Exception { - Directory dir2 = newFSDirectory(_TestUtil.getTempDir("2BDocs2")); + Directory dir2 = newFSDirectory(TestUtil.getTempDir("2BDocs2")); IndexWriter iw = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, null)); Document doc = new Document(); for (int i = 0; i < 262143; i++) { diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java b/lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java index 1ac17a2cf03..f83ef7a78f0 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java @@ -23,8 +23,8 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.TimeUnits; -import org.apache.lucene.util._TestUtil; import org.junit.Ignore; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; @@ -35,7 +35,7 @@ public class Test2BNumericDocValues extends LuceneTestCase { // indexes Integer.MAX_VALUE docs with an increasing dv field public void testNumerics() throws Exception { - BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BNumerics")); + BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BNumerics")); if (dir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER); } diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java b/lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java index 1426c8290be..7b9951338e5 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java @@ -28,8 +28,9 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.TimeUnits; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.junit.Ignore; @@ -46,7 +47,7 @@ public class Test2BPositions extends LuceneTestCase { // uses lots of space and takes a few minutes @Ignore("Very slow. Enable manually by removing @Ignore.") public void test() throws Exception { - BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BPositions")); + BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BPositions")); if (dir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER); } diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java b/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java index 5825eb89c6d..8fad986e2ac 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java @@ -29,8 +29,9 @@ import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.TimeUnits; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; @@ -44,7 +45,7 @@ public class Test2BPostings extends LuceneTestCase { @Nightly public void test() throws Exception { - BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BPostings")); + BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BPostings")); if (dir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER); } diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java b/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java index 3332bb67828..71ffeae71c1 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java @@ -30,8 +30,8 @@ import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.TimeUnits; -import org.apache.lucene.util._TestUtil; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.junit.Ignore; @@ -50,7 +50,7 @@ public class Test2BPostingsBytes extends LuceneTestCase { // with some codecs needs more heap space as well. @Ignore("Very slow. Enable manually by removing @Ignore.") public void test() throws Exception { - BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BPostingsBytes1")); + BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BPostingsBytes1")); if (dir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER); } @@ -93,7 +93,7 @@ public class Test2BPostingsBytes extends LuceneTestCase { IndexReader subReaders[] = new IndexReader[1000]; Arrays.fill(subReaders, oneThousand); MultiReader mr = new MultiReader(subReaders); - BaseDirectoryWrapper dir2 = newFSDirectory(_TestUtil.getTempDir("2BPostingsBytes2")); + BaseDirectoryWrapper dir2 = newFSDirectory(TestUtil.getTempDir("2BPostingsBytes2")); if (dir2 instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir2).setThrottling(MockDirectoryWrapper.Throttling.NEVER); } @@ -108,7 +108,7 @@ public class Test2BPostingsBytes extends LuceneTestCase { subReaders = new IndexReader[2000]; Arrays.fill(subReaders, oneMillion); mr = new MultiReader(subReaders); - BaseDirectoryWrapper dir3 = newFSDirectory(_TestUtil.getTempDir("2BPostingsBytes3")); + BaseDirectoryWrapper dir3 = newFSDirectory(TestUtil.getTempDir("2BPostingsBytes3")); if (dir3 instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir3).setThrottling(MockDirectoryWrapper.Throttling.NEVER); } diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java index 41f803c6710..b7c7a0f0dfb 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java @@ -26,8 +26,8 @@ import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.TimeUnits; -import org.apache.lucene.util._TestUtil; import org.junit.Ignore; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; @@ -38,7 +38,7 @@ public class Test2BSortedDocValues extends LuceneTestCase { // indexes Integer.MAX_VALUE docs with a fixed binary field public void testFixedSorted() throws Exception { - BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BFixedSorted")); + BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BFixedSorted")); if (dir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER); } @@ -95,7 +95,7 @@ public class Test2BSortedDocValues extends LuceneTestCase { // indexes Integer.MAX_VALUE docs with a fixed binary field // TODO: must use random.nextBytes (like Test2BTerms) to avoid BytesRefHash probing issues public void test2BOrds() throws Exception { - BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BOrds")); + BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BOrds")); if (dir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER); } diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java index a9601d202dd..b4249d0dc29 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java @@ -62,7 +62,7 @@ public class Test2BTerms extends LuceneTestCase { addAttribute(TermToBytesRefAttribute.class); bytes.length = TOKEN_LEN; this.random = random; - nextSave = _TestUtil.nextInt(random, 500000, 1000000); + nextSave = TestUtil.nextInt(random, 500000, 1000000); } @Override @@ -75,7 +75,7 @@ public class Test2BTerms extends LuceneTestCase { if (--nextSave == 0) { savedTerms.add(BytesRef.deepCopyOf(bytes)); System.out.println("TEST: save term=" + bytes); - nextSave = _TestUtil.nextInt(random, 500000, 1000000); + nextSave = TestUtil.nextInt(random, 500000, 1000000); } return true; } @@ -144,11 +144,11 @@ public class Test2BTerms extends LuceneTestCase { System.out.println("Starting Test2B"); final long TERM_COUNT = ((long) Integer.MAX_VALUE) + 100000000; - final int TERMS_PER_DOC = _TestUtil.nextInt(random(), 100000, 1000000); + final int TERMS_PER_DOC = TestUtil.nextInt(random(), 100000, 1000000); List savedTerms = null; - BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BTerms")); + BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BTerms")); //MockDirectoryWrapper dir = newFSDirectory(new File("/p/lucene/indices/2bindex")); if (dir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER); @@ -212,7 +212,7 @@ public class Test2BTerms extends LuceneTestCase { r.close(); System.out.println("TEST: now CheckIndex..."); - CheckIndex.Status status = _TestUtil.checkIndex(dir); + CheckIndex.Status status = TestUtil.checkIndex(dir); final long tc = status.segmentInfos.get(0).termIndexStatus.termCount; assertTrue("count " + tc + " is not > " + Integer.MAX_VALUE, tc > Integer.MAX_VALUE); @@ -224,13 +224,13 @@ public class Test2BTerms extends LuceneTestCase { System.out.println("TEST: findTerms"); final TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator(null); final List savedTerms = new ArrayList(); - int nextSave = _TestUtil.nextInt(random(), 500000, 1000000); + int nextSave = TestUtil.nextInt(random(), 500000, 1000000); BytesRef term; while((term = termsEnum.next()) != null) { if (--nextSave == 0) { savedTerms.add(BytesRef.deepCopyOf(term)); System.out.println("TEST: add " + term); - nextSave = _TestUtil.nextInt(random(), 500000, 1000000); + nextSave = TestUtil.nextInt(random(), 500000, 1000000); } } return savedTerms; diff --git a/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java b/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java index 6bca06beb2b..14b5ba56178 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java @@ -25,8 +25,9 @@ import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.TimeUnits; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; @@ -41,7 +42,7 @@ public class Test4GBStoredFields extends LuceneTestCase { @Nightly public void test() throws Exception { - MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new MMapDirectory(_TestUtil.getTempDir("4GBStoredFields"))); + MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new MMapDirectory(TestUtil.getTempDir("4GBStoredFields"))); dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER); IndexWriter w = new IndexWriter(dir, diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java index 0a5a7c6f066..31aeac89ae2 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java @@ -46,7 +46,7 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestAddIndexes extends LuceneTestCase { @@ -66,7 +66,7 @@ public class TestAddIndexes extends LuceneTestCase { addDocs(writer, 100); assertEquals(100, writer.maxDoc()); writer.close(); - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); writer = newWriter( aux, @@ -91,7 +91,7 @@ public class TestAddIndexes extends LuceneTestCase { writer.addIndexes(aux, aux2); assertEquals(190, writer.maxDoc()); writer.close(); - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); // make sure the old index is correct verifyNumDocs(aux, 40); @@ -540,7 +540,7 @@ public class TestAddIndexes extends LuceneTestCase { private void verifyTermDocs(Directory dir, Term term, int numDocs) throws IOException { IndexReader reader = DirectoryReader.open(dir); - DocsEnum docsEnum = _TestUtil.docs(random(), reader, term.field, term.bytes, null, null, DocsEnum.FLAG_NONE); + DocsEnum docsEnum = TestUtil.docs(random(), reader, term.field, term.bytes, null, null, DocsEnum.FLAG_NONE); int count = 0; while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) count++; @@ -915,7 +915,7 @@ public class TestAddIndexes extends LuceneTestCase { CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY); c.launchThreads(-1); - Thread.sleep(_TestUtil.nextInt(random(), 10, 500)); + Thread.sleep(TestUtil.nextInt(random(), 10, 500)); // Close w/o first stopping/joining the threads if (VERBOSE) { @@ -940,7 +940,7 @@ public class TestAddIndexes extends LuceneTestCase { CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY); c.launchThreads(-1); - Thread.sleep(_TestUtil.nextInt(random(), 10, 500)); + Thread.sleep(TestUtil.nextInt(random(), 10, 500)); // Close w/o first stopping/joining the threads if (VERBOSE) { @@ -1016,7 +1016,7 @@ public class TestAddIndexes extends LuceneTestCase { assertEquals(100, writer.maxDoc()); writer.commit(); writer.close(); - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); writer = newWriter( aux, @@ -1141,7 +1141,7 @@ public class TestAddIndexes extends LuceneTestCase { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); - conf.setCodec(_TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1 + random().nextInt(20)))); + conf.setCodec(TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1 + random().nextInt(20)))); IndexWriter w = new IndexWriter(dir, conf); try { w.addIndexes(toAdd); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java b/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java index 30b922cfb57..defea12a415 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java @@ -29,7 +29,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexInput; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Test that a plain default puts codec headers in all files. @@ -49,7 +49,7 @@ public class TestAllFilesHaveCodecHeader extends LuceneTestCase { doc.add(bodyField); for (int i = 0; i < 100; i++) { idField.setStringValue(Integer.toString(i)); - bodyField.setStringValue(_TestUtil.randomUnicodeString(random())); + bodyField.setStringValue(TestUtil.randomUnicodeString(random())); riw.addDocument(doc); if (random().nextInt(7) == 0) { riw.commit(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java index 644477e020c..e94667cd40d 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java @@ -17,7 +17,6 @@ package org.apache.lucene.index; */ import java.io.File; -import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.*; @@ -176,10 +175,10 @@ public class TestAtomicUpdate extends LuceneTestCase { directory.close(); // Second in an FSDirectory: - File dirPath = _TestUtil.getTempDir("lucene.test.atomic"); + File dirPath = TestUtil.getTempDir("lucene.test.atomic"); directory = newFSDirectory(dirPath); runTest(directory); directory.close(); - _TestUtil.rmDir(dirPath); + TestUtil.rmDir(dirPath); } } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java index a46ef934654..91d6c61a5f9 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java @@ -63,7 +63,7 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.StringHelper; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -205,7 +205,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { */ private static IndexUpgrader newIndexUpgrader(Directory dir) { final boolean streamType = random().nextBoolean(); - final int choice = _TestUtil.nextInt(random(), 0, 2); + final int choice = TestUtil.nextInt(random(), 0, 2); switch (choice) { case 0: return new IndexUpgrader(dir, TEST_VERSION_CURRENT); case 1: return new IndexUpgrader(dir, TEST_VERSION_CURRENT, @@ -224,9 +224,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase { names.addAll(Arrays.asList(oldSingleSegmentNames)); oldIndexDirs = new HashMap(); for (String name : names) { - File dir = _TestUtil.getTempDir(name); + File dir = TestUtil.getTempDir(name); File dataFile = new File(TestBackwardsCompatibility.class.getResource("index." + name + ".zip").toURI()); - _TestUtil.unzip(dataFile, dir); + TestUtil.unzip(dataFile, dir); oldIndexDirs.put(name, newFSDirectory(dir)); } } @@ -245,8 +245,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: index " + unsupportedNames[i]); } - File oldIndxeDir = _TestUtil.getTempDir(unsupportedNames[i]); - _TestUtil.unzip(getDataFile("unsupported." + unsupportedNames[i] + ".zip"), oldIndxeDir); + File oldIndxeDir = TestUtil.getTempDir(unsupportedNames[i]); + TestUtil.unzip(getDataFile("unsupported." + unsupportedNames[i] + ".zip"), oldIndxeDir); BaseDirectoryWrapper dir = newFSDirectory(oldIndxeDir); // don't checkindex, these are intentionally not supported dir.setCheckIndexOnClose(false); @@ -295,7 +295,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { assertTrue(bos.toString("UTF-8").contains(IndexFormatTooOldException.class.getName())); dir.close(); - _TestUtil.rmDir(oldIndxeDir); + TestUtil.rmDir(oldIndxeDir); } } @@ -388,7 +388,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = newSearcher(reader); - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); // true if this is a 4.0+ index final boolean is40Index = MultiFields.getMergedFieldInfos(reader).fieldInfo("content5") != null; @@ -594,7 +594,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { public File createIndex(String dirName, boolean doCFS, boolean fullyMerged) throws IOException { // we use a real directory name that is not cleaned up, because this method is only used to create backwards indexes: File indexDir = new File("/tmp/idx", dirName); - _TestUtil.rmDir(indexDir); + TestUtil.rmDir(indexDir); Directory dir = newFSDirectory(indexDir); LogByteSizeMergePolicy mp = new LogByteSizeMergePolicy(); mp.setNoCFSRatio(doCFS ? 1.0 : 0.0); @@ -642,8 +642,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase { public void testExactFileNames() throws IOException { String outputDirName = "lucene.backwardscompat0.index"; - File outputDir = _TestUtil.getTempDir(outputDirName); - _TestUtil.rmDir(outputDir); + File outputDir = TestUtil.getTempDir(outputDirName); + TestUtil.rmDir(outputDir); try { Directory dir = newFSDirectory(outputDir); @@ -701,7 +701,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { } dir.close(); } finally { - _TestUtil.rmDir(outputDir); + TestUtil.rmDir(outputDir); } } @@ -811,7 +811,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { // should be found exactly assertEquals(TermsEnum.SeekStatus.FOUND, terms.seekCeil(aaaTerm)); - assertEquals(35, countDocs(_TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE))); + assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE))); assertNull(terms.next()); // should hit end of field @@ -823,12 +823,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase { assertEquals(TermsEnum.SeekStatus.NOT_FOUND, terms.seekCeil(new BytesRef("a"))); assertTrue(terms.term().bytesEquals(aaaTerm)); - assertEquals(35, countDocs(_TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE))); + assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE))); assertNull(terms.next()); assertEquals(TermsEnum.SeekStatus.FOUND, terms.seekCeil(aaaTerm)); - assertEquals(35, countDocs(_TestUtil.docs(random(), terms,null, null, DocsEnum.FLAG_NONE))); + assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE))); assertNull(terms.next()); r.close(); @@ -952,9 +952,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase { public void testCommandLineArgs() throws Exception { for (String name : oldIndexDirs.keySet()) { - File dir = _TestUtil.getTempDir(name); + File dir = TestUtil.getTempDir(name); File dataFile = new File(TestBackwardsCompatibility.class.getResource("index." + name + ".zip").toURI()); - _TestUtil.unzip(dataFile, dir); + TestUtil.unzip(dataFile, dir); String path = dir.getAbsolutePath(); @@ -1045,11 +1045,11 @@ public class TestBackwardsCompatibility extends LuceneTestCase { public static final String moreTermsIndex = "moreterms.40.zip"; public void testMoreTerms() throws Exception { - File oldIndexDir = _TestUtil.getTempDir("moreterms"); - _TestUtil.unzip(getDataFile(moreTermsIndex), oldIndexDir); + File oldIndexDir = TestUtil.getTempDir("moreterms"); + TestUtil.unzip(getDataFile(moreTermsIndex), oldIndexDir); Directory dir = newFSDirectory(oldIndexDir); // TODO: more tests - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); dir.close(); } } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java index 54b25b5efc9..8b3c93be473 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java @@ -33,7 +33,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Simple test that adds numeric terms, where each term has the @@ -45,8 +45,8 @@ public class TestBagOfPositions extends LuceneTestCase { public void test() throws Exception { List postingsList = new ArrayList(); int numTerms = atLeast(300); - final int maxTermsPerDoc = _TestUtil.nextInt(random(), 10, 20); - boolean isSimpleText = "SimpleText".equals(_TestUtil.getPostingsFormat("field")); + final int maxTermsPerDoc = TestUtil.nextInt(random(), 10, 20); + boolean isSimpleText = "SimpleText".equals(TestUtil.getPostingsFormat("field")); IndexWriterConfig iwc = newIndexWriterConfig(random(), TEST_VERSION_CURRENT, new MockAnalyzer(random())); @@ -68,11 +68,11 @@ public class TestBagOfPositions extends LuceneTestCase { final ConcurrentLinkedQueue postings = new ConcurrentLinkedQueue(postingsList); - Directory dir = newFSDirectory(_TestUtil.getTempDir("bagofpositions")); + Directory dir = newFSDirectory(TestUtil.getTempDir("bagofpositions")); final RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); - int threadCount = _TestUtil.nextInt(random(), 1, 5); + int threadCount = TestUtil.nextInt(random(), 1, 5); if (VERBOSE) { System.out.println("config: " + iw.w.getConfig()); System.out.println("threadCount=" + threadCount); @@ -87,7 +87,7 @@ public class TestBagOfPositions extends LuceneTestCase { if (options == 0) { fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS); // we dont actually need positions fieldType.setStoreTermVectors(true); // but enforce term vectors when we do this so we check SOMETHING - } else if (options == 1 && !doesntSupportOffsets.contains(_TestUtil.getPostingsFormat("field"))) { + } else if (options == 1 && !doesntSupportOffsets.contains(TestUtil.getPostingsFormat("field"))) { fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); } // else just positions diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java index 28d058bb6a0..572069961bf 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java @@ -32,7 +32,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * Simple test that adds numeric terms, where each term has the @@ -43,9 +44,9 @@ public class TestBagOfPostings extends LuceneTestCase { public void test() throws Exception { List postingsList = new ArrayList(); int numTerms = atLeast(300); - final int maxTermsPerDoc = _TestUtil.nextInt(random(), 10, 20); + final int maxTermsPerDoc = TestUtil.nextInt(random(), 10, 20); - boolean isSimpleText = "SimpleText".equals(_TestUtil.getPostingsFormat("field")); + boolean isSimpleText = "SimpleText".equals(TestUtil.getPostingsFormat("field")); IndexWriterConfig iwc = newIndexWriterConfig(random(), TEST_VERSION_CURRENT, new MockAnalyzer(random())); @@ -69,10 +70,10 @@ public class TestBagOfPostings extends LuceneTestCase { final ConcurrentLinkedQueue postings = new ConcurrentLinkedQueue(postingsList); - Directory dir = newFSDirectory(_TestUtil.getTempDir("bagofpostings")); + Directory dir = newFSDirectory(TestUtil.getTempDir("bagofpostings")); final RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); - int threadCount = _TestUtil.nextInt(random(), 1, 5); + int threadCount = TestUtil.nextInt(random(), 1, 5); if (VERBOSE) { System.out.println("config: " + iw.w.getConfig()); System.out.println("threadCount=" + threadCount); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java index 05b493b7d79..171b97fe836 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java @@ -23,7 +23,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class TestCodecHoldsOpenFiles extends LuceneTestCase { public void test() throws Exception { @@ -49,7 +50,7 @@ public class TestCodecHoldsOpenFiles extends LuceneTestCase { } for(AtomicReaderContext cxt : r.leaves()) { - _TestUtil.checkReader(cxt.reader()); + TestUtil.checkReader(cxt.reader()); } r.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java index 4ebb8578992..30a6bd942e0 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java @@ -49,7 +49,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.BeforeClass; // TODO: test multiple codecs here? @@ -187,7 +187,7 @@ public class TestCodecs extends LuceneTestCase { // Make term text String text2; while(true) { - text2 = _TestUtil.randomUnicodeString(random()); + text2 = TestUtil.randomUnicodeString(random()); if (!termsSeen.contains(text2) && !text2.endsWith(".")) { termsSeen.add(text2); break; @@ -205,7 +205,7 @@ public class TestCodecs extends LuceneTestCase { int docID = 0; for(int j=0;j terms = new HashSet(); while(terms.size() < NUM_TERMS) { - final String s = _TestUtil.randomRealisticUnicodeString(random()); + final String s = TestUtil.randomRealisticUnicodeString(random()); //final String s = _TestUtil.randomSimpleString(random); if (s.length() > 0) { terms.add(new BytesRef(s)); @@ -113,7 +113,7 @@ public class TestDocTermOrds extends LuceneTestCase { // Sometimes swap in codec that impls ord(): if (random().nextInt(10) == 7) { // Make sure terms index has ords: - Codec codec = _TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene41WithOrds")); + Codec codec = TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene41WithOrds")); conf.setCodec(codec); } @@ -127,7 +127,7 @@ public class TestDocTermOrds extends LuceneTestCase { doc.add(new IntField("id", id, Field.Store.YES)); - final int termCount = _TestUtil.nextInt(random(), 0, 20*RANDOM_MULTIPLIER); + final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER); while(ordsForDocSet.size() < termCount) { ordsForDocSet.add(random().nextInt(termsArray.length)); } @@ -182,12 +182,12 @@ public class TestDocTermOrds extends LuceneTestCase { Directory dir = newDirectory(); final Set prefixes = new HashSet(); - final int numPrefix = _TestUtil.nextInt(random(), 2, 7); + final int numPrefix = TestUtil.nextInt(random(), 2, 7); if (VERBOSE) { System.out.println("TEST: use " + numPrefix + " prefixes"); } while(prefixes.size() < numPrefix) { - prefixes.add(_TestUtil.randomRealisticUnicodeString(random())); + prefixes.add(TestUtil.randomRealisticUnicodeString(random())); //prefixes.add(_TestUtil.randomSimpleString(random)); } final String[] prefixesArray = prefixes.toArray(new String[prefixes.size()]); @@ -195,7 +195,7 @@ public class TestDocTermOrds extends LuceneTestCase { final int NUM_TERMS = atLeast(20); final Set terms = new HashSet(); while(terms.size() < NUM_TERMS) { - final String s = prefixesArray[random().nextInt(prefixesArray.length)] + _TestUtil.randomRealisticUnicodeString(random()); + final String s = prefixesArray[random().nextInt(prefixesArray.length)] + TestUtil.randomRealisticUnicodeString(random()); //final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomSimpleString(random); if (s.length() > 0) { terms.add(new BytesRef(s)); @@ -210,7 +210,7 @@ public class TestDocTermOrds extends LuceneTestCase { // Sometimes swap in codec that impls ord(): if (random().nextInt(10) == 7) { - Codec codec = _TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene41WithOrds")); + Codec codec = TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene41WithOrds")); conf.setCodec(codec); } @@ -224,7 +224,7 @@ public class TestDocTermOrds extends LuceneTestCase { doc.add(new IntField("id", id, Field.Store.YES)); - final int termCount = _TestUtil.nextInt(random(), 0, 20*RANDOM_MULTIPLIER); + final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER); while(ordsForDocSet.size() < termCount) { ordsForDocSet.add(random().nextInt(termsArray.length)); } @@ -303,7 +303,7 @@ public class TestDocTermOrds extends LuceneTestCase { "field", prefixRef, Integer.MAX_VALUE, - _TestUtil.nextInt(random(), 2, 10)); + TestUtil.nextInt(random(), 2, 10)); final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(r, "id", false); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesFormat.java index 6bc3554bc03..11d57640cb1 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesFormat.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesFormat.java @@ -18,7 +18,8 @@ package org.apache.lucene.index; */ import org.apache.lucene.codecs.Codec; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** Tests the codec configuration defined by LuceneTestCase randomly * (typically a mix across different fields). @@ -32,6 +33,6 @@ public class TestDocValuesFormat extends BaseDocValuesFormatTestCase { @Override protected boolean codecAcceptsHugeBinaryValues(String field) { - return _TestUtil.fieldSupportsHugeBinaryDocValues(field); + return TestUtil.fieldSupportsHugeBinaryDocValues(field); } } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java index 964fc518df1..0ba48979694 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java @@ -34,7 +34,8 @@ import org.apache.lucene.search.FieldCache; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class TestDocValuesWithThreads extends LuceneTestCase { @@ -50,10 +51,10 @@ public class TestDocValuesWithThreads extends LuceneTestCase { Document d = new Document(); long number = random().nextLong(); d.add(new NumericDocValuesField("number", number)); - BytesRef bytes = new BytesRef(_TestUtil.randomRealisticUnicodeString(random())); + BytesRef bytes = new BytesRef(TestUtil.randomRealisticUnicodeString(random())); d.add(new BinaryDocValuesField("bytes", bytes)); binary.add(bytes); - bytes = new BytesRef(_TestUtil.randomRealisticUnicodeString(random())); + bytes = new BytesRef(TestUtil.randomRealisticUnicodeString(random())); d.add(new SortedDocValuesField("sorted", bytes)); sorted.add(bytes); w.addDocument(d); @@ -67,7 +68,7 @@ public class TestDocValuesWithThreads extends LuceneTestCase { assertEquals(1, r.leaves().size()); final AtomicReader ar = r.leaves().get(0).reader(); - int numThreads = _TestUtil.nextInt(random(), 2, 5); + int numThreads = TestUtil.nextInt(random(), 2, 5); List threads = new ArrayList(); final CountDownLatch startingGun = new CountDownLatch(1); for(int t=0;t 1) { - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); } dir.close(); return true; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java index 4d7fcbd0691..a1e27bba00c 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java @@ -28,11 +28,11 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.PrintStreamInfoStream; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestIndexWriterOutOfFileDescriptors extends LuceneTestCase { public void test() throws Exception { - MockDirectoryWrapper dir = newMockFSDirectory(_TestUtil.getTempDir("TestIndexWriterOutOfFileDescriptors")); + MockDirectoryWrapper dir = newMockFSDirectory(TestUtil.getTempDir("TestIndexWriterOutOfFileDescriptors")); dir.setPreventDoubleWrite(false); double rate = random().nextDouble()*0.01; //System.out.println("rate=" + rate); @@ -129,7 +129,7 @@ public class TestIndexWriterOutOfFileDescriptors extends LuceneTestCase { // it to addIndexes later: dir.setRandomIOExceptionRateOnOpen(0.0); r = DirectoryReader.open(dir); - dirCopy = newMockFSDirectory(_TestUtil.getTempDir("TestIndexWriterOutOfFileDescriptors.copy")); + dirCopy = newMockFSDirectory(TestUtil.getTempDir("TestIndexWriterOutOfFileDescriptors.copy")); Set files = new HashSet(); for (String file : dir.listAll()) { dir.copy(dirCopy, file, file, IOContext.DEFAULT); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java index a4d1958f96f..5fedf2bd74d 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -41,8 +41,8 @@ import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.ThreadInterruptedException; -import org.apache.lucene.util._TestUtil; import org.junit.Test; public class TestIndexWriterReader extends LuceneTestCase { @@ -51,11 +51,11 @@ public class TestIndexWriterReader extends LuceneTestCase { public static int count(Term t, IndexReader r) throws IOException { int count = 0; - DocsEnum td = _TestUtil.docs(random(), r, - t.field(), new BytesRef(t.text()), - MultiFields.getLiveDocs(r), - null, - 0); + DocsEnum td = TestUtil.docs(random(), r, + t.field(), new BytesRef(t.text()), + MultiFields.getLiveDocs(r), + null, + 0); if (td != null) { while (td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { @@ -370,7 +370,7 @@ public class TestIndexWriterReader extends LuceneTestCase { Directory mainDir = getAssertNoDeletesDirectory(newDirectory()); IndexWriter mainWriter = new IndexWriter(mainDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); - _TestUtil.reduceOpenFiles(mainWriter); + TestUtil.reduceOpenFiles(mainWriter); AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter); addDirThreads.launchThreads(numDirs); @@ -384,7 +384,7 @@ public class TestIndexWriterReader extends LuceneTestCase { assertTrue(addDirThreads.failures.size() == 0); - _TestUtil.checkIndex(mainDir); + TestUtil.checkIndex(mainDir); IndexReader reader = DirectoryReader.open(mainDir); assertEquals(addDirThreads.count.intValue(), reader.numDocs()); @@ -413,7 +413,7 @@ public class TestIndexWriterReader extends LuceneTestCase { this.mainWriter = mainWriter; addDir = newDirectory(); IndexWriter writer = new IndexWriter(addDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2)); - _TestUtil.reduceOpenFiles(writer); + TestUtil.reduceOpenFiles(writer); for (int i = 0; i < NUM_INIT_DOCS; i++) { Document doc = DocHelper.createDocument(i, "addindex", 4); writer.addDocument(doc); @@ -659,9 +659,9 @@ public class TestIndexWriterReader extends LuceneTestCase { // get a reader to put writer into near real-time mode DirectoryReader r1 = writer.getReader(); - _TestUtil.checkIndex(dir1); + TestUtil.checkIndex(dir1); writer.commit(); - _TestUtil.checkIndex(dir1); + TestUtil.checkIndex(dir1); assertEquals(100, r1.numDocs()); for (int i = 0; i < 10; i++) { @@ -691,7 +691,7 @@ public class TestIndexWriterReader extends LuceneTestCase { DirectoryReader r = writer.getReader(); writer.close(); - _TestUtil.checkIndex(dir1); + TestUtil.checkIndex(dir1); // reader should remain usable even after IndexWriter is closed: assertEquals(100, r.numDocs()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java index 540f1f3217a..4e40b4159fa 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java @@ -40,8 +40,8 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.ThreadInterruptedException; -import org.apache.lucene.util._TestUtil; import org.apache.lucene.util.LuceneTestCase.Slow; /** @@ -228,12 +228,12 @@ public class TestIndexWriterWithThreads extends LuceneTestCase { // Quick test to make sure index is not corrupt: IndexReader reader = DirectoryReader.open(dir); - DocsEnum tdocs = _TestUtil.docs(random(), reader, - "field", - new BytesRef("aaa"), - MultiFields.getLiveDocs(reader), - null, - 0); + DocsEnum tdocs = TestUtil.docs(random(), reader, + "field", + new BytesRef("aaa"), + MultiFields.getLiveDocs(reader), + null, + 0); int count = 0; while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { count++; @@ -545,7 +545,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase { ((MockDirectoryWrapper)d).setPreventDoubleWrite(false); } - final int threadCount = _TestUtil.nextInt(random(), 2, 6); + final int threadCount = TestUtil.nextInt(random(), 2, 6); final AtomicReference writerRef = new AtomicReference(); writerRef.set(new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())))); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java index cd303f2523b..a83815fed9b 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java @@ -35,7 +35,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestIndexableField extends LuceneTestCase { @@ -172,7 +172,7 @@ public class TestIndexableField extends LuceneTestCase { int baseCount = 0; for(int docCount=0;docCount terms = new ArrayList(); - int maxCeiling = _TestUtil.nextInt(random(), 0, 255); + int maxCeiling = TestUtil.nextInt(random(), 0, 255); int max = 0; for (char ch = 'a'; ch <= 'z'; ch++) { - int num = _TestUtil.nextInt(random(), 0, maxCeiling); + int num = TestUtil.nextInt(random(), 0, maxCeiling); for (int i = 0; i < num; i++) terms.add(Character.toString(ch)); max = Math.max(max, num); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java index c8844797a1c..9cb54b176ab 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java @@ -26,7 +26,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestMixedCodecs extends LuceneTestCase { @@ -56,7 +56,7 @@ public class TestMixedCodecs extends LuceneTestCase { w.close(); } w = new RandomIndexWriter(random(), dir, iwc); - docsLeftInThisSegment = _TestUtil.nextInt(random(), 10, 100); + docsLeftInThisSegment = TestUtil.nextInt(random(), 10, 100); } final Document doc = new Document(); doc.add(newStringField("id", String.valueOf(docUpto), Field.Store.YES)); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java index c36b049cffd..72d546f1afd 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java @@ -29,7 +29,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** Tests MultiDocValues versus ordinary segment merging */ public class TestMultiDocValues extends LuceneTestCase { @@ -81,7 +82,7 @@ public class TestMultiDocValues extends LuceneTestCase { int numDocs = atLeast(500); for (int i = 0; i < numDocs; i++) { - ref.copyChars(_TestUtil.randomUnicodeString(random())); + ref.copyChars(TestUtil.randomUnicodeString(random())); iw.addDocument(doc); if (random().nextInt(17) == 0) { iw.commit(); @@ -120,7 +121,7 @@ public class TestMultiDocValues extends LuceneTestCase { int numDocs = atLeast(500); for (int i = 0; i < numDocs; i++) { - ref.copyChars(_TestUtil.randomUnicodeString(random())); + ref.copyChars(TestUtil.randomUnicodeString(random())); if (defaultCodecSupportsDocsWithField() && random().nextInt(7) == 0) { iw.addDocument(new Document()); } @@ -167,7 +168,7 @@ public class TestMultiDocValues extends LuceneTestCase { int numDocs = atLeast(500); for (int i = 0; i < numDocs; i++) { - ref.copyChars(_TestUtil.randomSimpleString(random(), 2)); + ref.copyChars(TestUtil.randomSimpleString(random(), 2)); iw.addDocument(doc); if (random().nextInt(17) == 0) { iw.commit(); @@ -210,7 +211,7 @@ public class TestMultiDocValues extends LuceneTestCase { Document doc = new Document(); int numValues = random().nextInt(5); for (int j = 0; j < numValues; j++) { - doc.add(new SortedSetDocValuesField("bytes", new BytesRef(_TestUtil.randomUnicodeString(random())))); + doc.add(new SortedSetDocValuesField("bytes", new BytesRef(TestUtil.randomUnicodeString(random())))); } iw.addDocument(doc); if (random().nextInt(17) == 0) { @@ -275,7 +276,7 @@ public class TestMultiDocValues extends LuceneTestCase { Document doc = new Document(); int numValues = random().nextInt(5); for (int j = 0; j < numValues; j++) { - doc.add(new SortedSetDocValuesField("bytes", new BytesRef(_TestUtil.randomSimpleString(random(), 2)))); + doc.add(new SortedSetDocValuesField("bytes", new BytesRef(TestUtil.randomSimpleString(random(), 2)))); } iw.addDocument(doc); if (random().nextInt(17) == 0) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java index 1f51ecb605a..6c162fe2374 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java @@ -44,7 +44,7 @@ public class TestMultiFields extends LuceneTestCase { Set deleted = new HashSet(); List terms = new ArrayList(); - int numDocs = _TestUtil.nextInt(random(), 1, 100 * RANDOM_MULTIPLIER); + int numDocs = TestUtil.nextInt(random(), 1, 100 * RANDOM_MULTIPLIER); Document doc = new Document(); Field f = newStringField("field", "", Field.Store.NO); doc.add(f); @@ -64,7 +64,7 @@ public class TestMultiFields extends LuceneTestCase { docs.get(term).add(i); f.setStringValue(term.utf8ToString()); } else { - String s = _TestUtil.randomUnicodeString(random(), 10); + String s = TestUtil.randomUnicodeString(random(), 10); BytesRef term = new BytesRef(s); if (!docs.containsKey(term)) { docs.put(term, new ArrayList()); @@ -122,7 +122,7 @@ public class TestMultiFields extends LuceneTestCase { System.out.println("TEST: seek term="+ UnicodeUtil.toHexString(term.utf8ToString()) + " " + term); } - DocsEnum docsEnum = _TestUtil.docs(random(), reader, "field", term, liveDocs, null, DocsEnum.FLAG_NONE); + DocsEnum docsEnum = TestUtil.docs(random(), reader, "field", term, liveDocs, null, DocsEnum.FLAG_NONE); assertNotNull(docsEnum); for(int docID : docs.get(term)) { @@ -163,8 +163,8 @@ public class TestMultiFields extends LuceneTestCase { w.addDocument(d); IndexReader r = w.getReader(); w.close(); - DocsEnum d1 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, DocsEnum.FLAG_NONE); - DocsEnum d2 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, DocsEnum.FLAG_NONE); + DocsEnum d1 = TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, DocsEnum.FLAG_NONE); + DocsEnum d2 = TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, DocsEnum.FLAG_NONE); assertEquals(0, d1.nextDoc()); assertEquals(0, d2.nextDoc()); r.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java index 63f75f3583a..22c285fabdf 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java @@ -18,7 +18,6 @@ package org.apache.lucene.index; */ import java.io.IOException; -import java.io.Reader; import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.analysis.*; @@ -33,7 +32,7 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Before; /** @@ -69,7 +68,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase { public void testSimpleSkip() throws IOException { Directory dir = new CountingRAMDirectory(new RAMDirectory()); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new PayloadAnalyzer()).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())).setMergePolicy(newLogMergePolicy())); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new PayloadAnalyzer()).setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())).setMergePolicy(newLogMergePolicy())); Term term = new Term("test", "a"); for (int i = 0; i < 5000; i++) { Document d1 = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java index 8e9dec3c33d..9f45e21afb6 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java @@ -27,7 +27,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; // Make sure if you use NoDeletionPolicy that no file // referenced by a commit point is ever deleted @@ -35,7 +35,7 @@ import org.apache.lucene.util._TestUtil; public class TestNeverDelete extends LuceneTestCase { public void testIndexing() throws Exception { - final File tmpDir = _TestUtil.getTempDir("TestNeverDelete"); + final File tmpDir = TestUtil.getTempDir("TestNeverDelete"); final BaseDirectoryWrapper d = newFSDirectory(tmpDir); // We want to "see" files removed if Lucene removed @@ -49,7 +49,7 @@ public class TestNeverDelete extends LuceneTestCase { newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE)); - w.w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random(), 5, 30)); + w.w.getConfig().setMaxBufferedDocs(TestUtil.nextInt(random(), 5, 30)); w.commit(); Thread[] indexThreads = new Thread[random().nextInt(4)]; @@ -108,6 +108,6 @@ public class TestNeverDelete extends LuceneTestCase { w.close(); d.close(); - _TestUtil.rmDir(tmpDir); + TestUtil.rmDir(tmpDir); } } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java index bb91737066c..7a0cdb2faef 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java @@ -36,7 +36,7 @@ import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Test that norms info is preserved during index life - including @@ -107,7 +107,7 @@ public class TestNorms extends LuceneTestCase { } public void testMaxByteNorms() throws IOException { - Directory dir = newFSDirectory(_TestUtil.getTempDir("TestNorms.testMaxByteNorms")); + Directory dir = newFSDirectory(TestUtil.getTempDir("TestNorms.testMaxByteNorms")); buildIndex(dir); AtomicReader open = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir)); NumericDocValues normValues = open.getNormValues(byteTestField); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java index 69a74586a29..4d81ba5afb4 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java @@ -32,7 +32,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Test; import com.carrotsearch.randomizedtesting.generators.RandomPicks; @@ -981,7 +982,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase { final IndexWriter writer = new IndexWriter(dir, conf); // create index - final int numThreads = _TestUtil.nextInt(random(), 3, 6); + final int numThreads = TestUtil.nextInt(random(), 3, 6); final int numDocs = atLeast(2000); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); @@ -1200,10 +1201,10 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase { IndexWriter writer = new IndexWriter(dir1, conf); final int numDocs = atLeast(50); - final int numTerms = _TestUtil.nextInt(random(), 1, numDocs / 5); + final int numTerms = TestUtil.nextInt(random(), 1, numDocs / 5); Set randomTerms = new HashSet(); while (randomTerms.size() < numTerms) { - randomTerms.add(_TestUtil.randomSimpleString(random())); + randomTerms.add(TestUtil.randomSimpleString(random())); } // create first index @@ -1298,10 +1299,10 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase { // test data: lots of documents (few 10Ks) and lots of update terms (few hundreds) final int numDocs = atLeast(20000); final int numNumericFields = atLeast(5); - final int numTerms = _TestUtil.nextInt(random, 10, 100); // terms should affect many docs + final int numTerms = TestUtil.nextInt(random, 10, 100); // terms should affect many docs Set updateTerms = new HashSet(); while (updateTerms.size() < numTerms) { - updateTerms.add(_TestUtil.randomSimpleString(random)); + updateTerms.add(TestUtil.randomSimpleString(random)); } // System.out.println("numDocs=" + numDocs + " numNumericFields=" + numNumericFields + " numTerms=" + numTerms); @@ -1309,7 +1310,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase { // build a large index with many NDV fields and update terms for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - int numUpdateTerms = _TestUtil.nextInt(random, 1, numTerms / 10); + int numUpdateTerms = TestUtil.nextInt(random, 1, numTerms / 10); for (int j = 0; j < numUpdateTerms; j++) { doc.add(new StringField("upd", RandomPicks.randomFrom(random, updateTerms), Store.NO)); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java index c400fc556de..5be0818aa85 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java @@ -27,7 +27,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestOmitNorms extends LuceneTestCase { // Tests whether the DocumentWriter correctly enable the @@ -281,7 +281,7 @@ public class TestOmitNorms extends LuceneTestCase { riw.addDocument(d); // add a mix of f1's and f2's - int numExtraDocs = _TestUtil.nextInt(random(), 1, 1000); + int numExtraDocs = TestUtil.nextInt(random(), 1, 1000); for (int i = 0; i < numExtraDocs; i++) { d = new Document(); d.add(random().nextBoolean() ? f1 : f2); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java index aeee6e60df2..004b3515c14 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java @@ -28,7 +28,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * @@ -53,7 +53,7 @@ public class TestOmitPositions extends LuceneTestCase { assertNull(MultiFields.getTermPositionsEnum(reader, null, "foo", new BytesRef("test"))); - DocsEnum de = _TestUtil.docs(random(), reader, "foo", new BytesRef("test"), null, null, DocsEnum.FLAG_FREQS); + DocsEnum de = TestUtil.docs(random(), reader, "foo", new BytesRef("test"), null, null, DocsEnum.FLAG_FREQS); while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { assertEquals(2, de.freq()); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java index c1ff0bc4a1a..835f36f30e9 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java @@ -28,7 +28,7 @@ import org.apache.lucene.search.*; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestParallelAtomicReader extends LuceneTestCase { @@ -287,7 +287,7 @@ public class TestParallelAtomicReader extends LuceneTestCase { ParallelAtomicReader pr = new ParallelAtomicReader( SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)), SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2))); - _TestUtil.checkReader(pr); + TestUtil.checkReader(pr); return newSearcher(pr); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java index 9d47186d5c4..48496432c10 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java @@ -28,7 +28,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestParallelTermEnum extends LuceneTestCase { private AtomicReader ir1; @@ -82,7 +82,7 @@ public class TestParallelTermEnum extends LuceneTestCase { BytesRef b = te.next(); assertNotNull(b); assertEquals(t, b.utf8ToString()); - DocsEnum td = _TestUtil.docs(random(), te, liveDocs, null, DocsEnum.FLAG_NONE); + DocsEnum td = TestUtil.docs(random(), te, liveDocs, null, DocsEnum.FLAG_NONE); assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocIdSetIterator.NO_MORE_DOCS); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java index db24d51f011..dda5ab3f5fd 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java @@ -18,7 +18,6 @@ package org.apache.lucene.index; */ import java.io.IOException; -import java.io.Reader; import java.io.StringReader; import java.nio.charset.Charset; import java.util.ArrayList; @@ -38,7 +37,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestPayloads extends LuceneTestCase { @@ -301,7 +300,7 @@ public class TestPayloads extends LuceneTestCase { static final Charset utf8 = Charset.forName("UTF-8"); private void generateRandomData(byte[] data) { // this test needs the random data to be valid unicode - String s = _TestUtil.randomFixedByteLengthUnicodeString(random(), data.length); + String s = TestUtil.randomFixedByteLengthUnicodeString(random(), data.length); byte b[] = s.getBytes(utf8); assert b.length == data.length; System.arraycopy(b, 0, data, 0, b.length); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java index 4ff5db3f00e..c5045428c32 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.index.MergePolicy.MergeTrigger; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; @@ -33,7 +32,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestPerSegmentDeletes extends LuceneTestCase { public void testDeletes1() throws Exception { @@ -228,7 +227,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase { Terms cterms = fields.terms(term.field); TermsEnum ctermsEnum = cterms.iterator(null); if (ctermsEnum.seekExact(new BytesRef(term.text()))) { - DocsEnum docsEnum = _TestUtil.docs(random(), ctermsEnum, bits, null, DocsEnum.FLAG_NONE); + DocsEnum docsEnum = TestUtil.docs(random(), ctermsEnum, bits, null, DocsEnum.FLAG_NONE); return toArray(docsEnum); } return null; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java index b7456251db4..c460af4f579 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java @@ -44,7 +44,7 @@ import org.apache.lucene.util.English; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; // TODO: we really need to test indexingoffsets, but then getting only docs / docs + freqs. // not all codecs store prx separate... @@ -183,7 +183,7 @@ public class TestPostingsOffsets extends LuceneTestCase { int numSkippingTests = atLeast(50); for (int j = 0; j < numSkippingTests; j++) { - int num = _TestUtil.nextInt(random(), 100, Math.min(numDocs-1, 999)); + int num = TestUtil.nextInt(random(), 100, Math.min(numDocs - 1, 999)); DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred")); int doc = dp.advance(num); assertEquals(num, doc); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java b/lucene/core/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java index 3f55f07b9dc..11d2c2313b6 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java @@ -18,7 +18,6 @@ package org.apache.lucene.index; */ import java.util.ArrayList; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Set; @@ -26,7 +25,7 @@ import java.util.TreeSet; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.MergedIterator; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestPrefixCodedTerms extends LuceneTestCase { @@ -50,7 +49,7 @@ public class TestPrefixCodedTerms extends LuceneTestCase { Set terms = new TreeSet(); int nterms = atLeast(10000); for (int i = 0; i < nterms; i++) { - Term term = new Term(_TestUtil.randomUnicodeString(random(), 2), _TestUtil.randomUnicodeString(random())); + Term term = new Term(TestUtil.randomUnicodeString(random(), 2), TestUtil.randomUnicodeString(random())); terms.add(term); } @@ -89,14 +88,14 @@ public class TestPrefixCodedTerms extends LuceneTestCase { @SuppressWarnings({"unchecked","rawtypes"}) public void testMergeRandom() { - PrefixCodedTerms pb[] = new PrefixCodedTerms[_TestUtil.nextInt(random(), 2, 10)]; + PrefixCodedTerms pb[] = new PrefixCodedTerms[TestUtil.nextInt(random(), 2, 10)]; Set superSet = new TreeSet(); for (int i = 0; i < pb.length; i++) { Set terms = new TreeSet(); - int nterms = _TestUtil.nextInt(random(), 0, 10000); + int nterms = TestUtil.nextInt(random(), 0, 10000); for (int j = 0; j < nterms; j++) { - Term term = new Term(_TestUtil.randomUnicodeString(random(), 2), _TestUtil.randomUnicodeString(random(), 4)); + Term term = new Term(TestUtil.randomUnicodeString(random(), 2), TestUtil.randomUnicodeString(random(), 4)); terms.add(term); } superSet.addAll(terms); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java b/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java index 7e38d4bf41c..b929b7cea32 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestReaderClosed extends LuceneTestCase { private IndexReader reader; @@ -38,7 +38,7 @@ public class TestReaderClosed extends LuceneTestCase { dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); + .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); Document doc = new Document(); Field field = newStringField("field", "", Field.Store.NO); @@ -48,7 +48,7 @@ public class TestReaderClosed extends LuceneTestCase { // but for preflex codec, the test can be very slow, so use less iterations. int num = atLeast(10); for (int i = 0; i < num; i++) { - field.setStringValue(_TestUtil.randomUnicodeString(random(), 10)); + field.setStringValue(TestUtil.randomUnicodeString(random(), 10)); writer.addDocument(doc); } reader = writer.getReader(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java index fb493f04975..008dd854ef8 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java @@ -43,7 +43,7 @@ public class TestRollingUpdates extends LuceneTestCase { //provider.register(new MemoryCodec()); if (random().nextBoolean()) { - Codec.setDefault(_TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat(random().nextBoolean(), random.nextFloat()))); + Codec.setDefault(TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat(random().nextBoolean(), random.nextFloat()))); } final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); @@ -160,7 +160,7 @@ public class TestRollingUpdates extends LuceneTestCase { final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2)); final int numUpdates = atLeast(20); - int numThreads = _TestUtil.nextInt(random(), 2, 6); + int numThreads = TestUtil.nextInt(random(), 2, 6); IndexingThread[] threads = new IndexingThread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new IndexingThread(docs, w, numUpdates); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java index 0f8391b636e..e7e4fcfad37 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java @@ -29,7 +29,8 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class TestSegmentMerger extends LuceneTestCase { //The variables for the new merged segment @@ -102,12 +103,12 @@ public class TestSegmentMerger extends LuceneTestCase { assertTrue(newDoc2 != null); assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); - DocsEnum termDocs = _TestUtil.docs(random(), mergedReader, - DocHelper.TEXT_FIELD_2_KEY, - new BytesRef("field"), - MultiFields.getLiveDocs(mergedReader), - null, - 0); + DocsEnum termDocs = TestUtil.docs(random(), mergedReader, + DocHelper.TEXT_FIELD_2_KEY, + new BytesRef("field"), + MultiFields.getLiveDocs(mergedReader), + null, + 0); assertTrue(termDocs != null); assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); @@ -153,8 +154,8 @@ public class TestSegmentMerger extends LuceneTestCase { } public void testBuildDocMap() { - final int maxDoc = _TestUtil.nextInt(random(), 1, 128); - final int numDocs = _TestUtil.nextInt(random(), 0, maxDoc); + final int maxDoc = TestUtil.nextInt(random(), 1, 128); + final int numDocs = TestUtil.nextInt(random(), 0, maxDoc); final int numDeletedDocs = maxDoc - numDocs; final FixedBitSet liveDocs = new FixedBitSet(maxDoc); for (int i = 0; i < numDocs; ++i) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java index 8039094a631..11e1ab10c45 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java @@ -28,8 +28,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; -import org.junit.Assume; +import org.apache.lucene.util.TestUtil; public class TestSegmentReader extends LuceneTestCase { private Directory dir; @@ -128,20 +127,20 @@ public class TestSegmentReader extends LuceneTestCase { } } - DocsEnum termDocs = _TestUtil.docs(random(), reader, - DocHelper.TEXT_FIELD_1_KEY, - new BytesRef("field"), - MultiFields.getLiveDocs(reader), - null, - 0); + DocsEnum termDocs = TestUtil.docs(random(), reader, + DocHelper.TEXT_FIELD_1_KEY, + new BytesRef("field"), + MultiFields.getLiveDocs(reader), + null, + 0); assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); - termDocs = _TestUtil.docs(random(), reader, - DocHelper.NO_NORMS_KEY, - new BytesRef(DocHelper.NO_NORMS_TEXT), - MultiFields.getLiveDocs(reader), - null, - 0); + termDocs = TestUtil.docs(random(), reader, + DocHelper.NO_NORMS_KEY, + new BytesRef(DocHelper.NO_NORMS_TEXT), + MultiFields.getLiveDocs(reader), + null, + 0); assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java index bbc8a22cc47..0843d875749 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestSegmentTermDocs extends LuceneTestCase { private Document testDoc = new Document(); @@ -58,7 +58,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { TermsEnum terms = reader.fields().terms(DocHelper.TEXT_FIELD_2_KEY).iterator(null); terms.seekCeil(new BytesRef("field")); - DocsEnum termDocs = _TestUtil.docs(random(), terms, reader.getLiveDocs(), null, DocsEnum.FLAG_FREQS); + DocsEnum termDocs = TestUtil.docs(random(), terms, reader.getLiveDocs(), null, DocsEnum.FLAG_FREQS); if (termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int docId = termDocs.docID(); assertTrue(docId == 0); @@ -73,12 +73,12 @@ public class TestSegmentTermDocs extends LuceneTestCase { //After adding the document, we should be able to read it back in SegmentReader reader = new SegmentReader(info, newIOContext(random())); assertTrue(reader != null); - DocsEnum termDocs = _TestUtil.docs(random(), reader, - "textField2", - new BytesRef("bad"), - reader.getLiveDocs(), - null, - 0); + DocsEnum termDocs = TestUtil.docs(random(), reader, + "textField2", + new BytesRef("bad"), + reader.getLiveDocs(), + null, + 0); assertNull(termDocs); reader.close(); @@ -87,12 +87,12 @@ public class TestSegmentTermDocs extends LuceneTestCase { //After adding the document, we should be able to read it back in SegmentReader reader = new SegmentReader(info, newIOContext(random())); assertTrue(reader != null); - DocsEnum termDocs = _TestUtil.docs(random(), reader, - "junk", - new BytesRef("bad"), - reader.getLiveDocs(), - null, - 0); + DocsEnum termDocs = TestUtil.docs(random(), reader, + "junk", + new BytesRef("bad"), + reader.getLiveDocs(), + null, + 0); assertNull(termDocs); reader.close(); } @@ -120,12 +120,12 @@ public class TestSegmentTermDocs extends LuceneTestCase { IndexReader reader = DirectoryReader.open(dir); - DocsEnum tdocs = _TestUtil.docs(random(), reader, - ta.field(), - new BytesRef(ta.text()), - MultiFields.getLiveDocs(reader), - null, - DocsEnum.FLAG_FREQS); + DocsEnum tdocs = TestUtil.docs(random(), reader, + ta.field(), + new BytesRef(ta.text()), + MultiFields.getLiveDocs(reader), + null, + DocsEnum.FLAG_FREQS); // without optimization (assumption skipInterval == 16) @@ -145,12 +145,12 @@ public class TestSegmentTermDocs extends LuceneTestCase { assertFalse(tdocs.advance(10) != DocIdSetIterator.NO_MORE_DOCS); // without next - tdocs = _TestUtil.docs(random(), reader, - ta.field(), - new BytesRef(ta.text()), - MultiFields.getLiveDocs(reader), - null, - 0); + tdocs = TestUtil.docs(random(), reader, + ta.field(), + new BytesRef(ta.text()), + MultiFields.getLiveDocs(reader), + null, + 0); assertTrue(tdocs.advance(0) != DocIdSetIterator.NO_MORE_DOCS); assertEquals(0, tdocs.docID()); @@ -163,12 +163,12 @@ public class TestSegmentTermDocs extends LuceneTestCase { // exactly skipInterval documents and therefore with optimization // with next - tdocs = _TestUtil.docs(random(), reader, - tb.field(), - new BytesRef(tb.text()), - MultiFields.getLiveDocs(reader), - null, - DocsEnum.FLAG_FREQS); + tdocs = TestUtil.docs(random(), reader, + tb.field(), + new BytesRef(tb.text()), + MultiFields.getLiveDocs(reader), + null, + DocsEnum.FLAG_FREQS); assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(10, tdocs.docID()); @@ -187,12 +187,12 @@ public class TestSegmentTermDocs extends LuceneTestCase { assertFalse(tdocs.advance(26) != DocIdSetIterator.NO_MORE_DOCS); // without next - tdocs = _TestUtil.docs(random(), reader, - tb.field(), - new BytesRef(tb.text()), - MultiFields.getLiveDocs(reader), - null, - DocsEnum.FLAG_FREQS); + tdocs = TestUtil.docs(random(), reader, + tb.field(), + new BytesRef(tb.text()), + MultiFields.getLiveDocs(reader), + null, + DocsEnum.FLAG_FREQS); assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS); assertEquals(10, tdocs.docID()); @@ -207,12 +207,12 @@ public class TestSegmentTermDocs extends LuceneTestCase { // much more than skipInterval documents and therefore with optimization // with next - tdocs = _TestUtil.docs(random(), reader, - tc.field(), - new BytesRef(tc.text()), - MultiFields.getLiveDocs(reader), - null, - DocsEnum.FLAG_FREQS); + tdocs = TestUtil.docs(random(), reader, + tc.field(), + new BytesRef(tc.text()), + MultiFields.getLiveDocs(reader), + null, + DocsEnum.FLAG_FREQS); assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(26, tdocs.docID()); @@ -233,12 +233,12 @@ public class TestSegmentTermDocs extends LuceneTestCase { assertFalse(tdocs.advance(76) != DocIdSetIterator.NO_MORE_DOCS); //without next - tdocs = _TestUtil.docs(random(), reader, - tc.field(), - new BytesRef(tc.text()), - MultiFields.getLiveDocs(reader), - null, - 0); + tdocs = TestUtil.docs(random(), reader, + tc.field(), + new BytesRef(tc.text()), + MultiFields.getLiveDocs(reader), + null, + 0); assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS); assertEquals(26, tdocs.docID()); assertTrue(tdocs.advance(40) != DocIdSetIterator.NO_MORE_DOCS); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java index 1379a3c223e..b2796ea6d06 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java @@ -22,7 +22,7 @@ import java.io.IOException; import org.apache.lucene.document.Field; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat; import org.apache.lucene.document.Document; @@ -75,7 +75,7 @@ public class TestSegmentTermEnum extends LuceneTestCase { public void testPrevTermAtEnd() throws IOException { - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()))); addDoc(writer, "aaa bbb"); writer.close(); SegmentReader reader = getOnlySegmentReader(DirectoryReader.open(dir)); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java index 3a1335259a9..c24b171871c 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java @@ -82,11 +82,11 @@ public class TestStressAdvance extends LuceneTestCase { System.out.println("\nTEST: iter=" + iter + " iter2=" + iter2); } assertEquals(TermsEnum.SeekStatus.FOUND, te.seekCeil(new BytesRef("a"))); - de = _TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE); + de = TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE); testOne(de, aDocIDs); assertEquals(TermsEnum.SeekStatus.FOUND, te.seekCeil(new BytesRef("b"))); - de = _TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE); + de = TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE); testOne(de, bDocIDs); } @@ -115,7 +115,7 @@ public class TestStressAdvance extends LuceneTestCase { docID = docs.nextDoc(); } else { // test advance() - final int inc = _TestUtil.nextInt(random(), 1, expected.size()-1-upto); + final int inc = TestUtil.nextInt(random(), 1, expected.size() - 1 - upto); if (VERBOSE) { System.out.println(" do advance inc=" + inc); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java index 9ce0b06dcf7..9c154cb5c0a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java @@ -187,7 +187,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } } - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); DocsAndWriter dw = new DocsAndWriter(); dw.docs = docs; dw.writer = w; @@ -233,7 +233,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } //System.out.println("TEST: checkindex"); - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); return docs; } @@ -333,7 +333,7 @@ public class TestStressIndexing2 extends LuceneTestCase { Bits liveDocs = MultiFields.getLiveDocs(r1); DocsEnum docs = null; while(termsEnum.next() != null) { - docs = _TestUtil.docs(random(), termsEnum, liveDocs, docs, DocsEnum.FLAG_NONE); + docs = TestUtil.docs(random(), termsEnum, liveDocs, docs, DocsEnum.FLAG_NONE); while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { fail("r1 is not empty but r2 is"); } @@ -353,9 +353,9 @@ public class TestStressIndexing2 extends LuceneTestCase { break; } - termDocs1 = _TestUtil.docs(random(), termsEnum, liveDocs1, termDocs1, DocsEnum.FLAG_NONE); + termDocs1 = TestUtil.docs(random(), termsEnum, liveDocs1, termDocs1, DocsEnum.FLAG_NONE); if (termsEnum2.seekExact(term)) { - termDocs2 = _TestUtil.docs(random(), termsEnum2, liveDocs2, termDocs2, DocsEnum.FLAG_NONE); + termDocs2 = TestUtil.docs(random(), termsEnum2, liveDocs2, termDocs2, DocsEnum.FLAG_NONE); } else { termDocs2 = null; } @@ -412,7 +412,7 @@ public class TestStressIndexing2 extends LuceneTestCase { System.out.println(" pos=" + dpEnum.nextPosition()); } } else { - dEnum = _TestUtil.docs(random(), termsEnum3, null, dEnum, DocsEnum.FLAG_FREQS); + dEnum = TestUtil.docs(random(), termsEnum3, null, dEnum, DocsEnum.FLAG_FREQS); assertNotNull(dEnum); assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); final int freq = dEnum.freq(); @@ -444,7 +444,7 @@ public class TestStressIndexing2 extends LuceneTestCase { System.out.println(" pos=" + dpEnum.nextPosition()); } } else { - dEnum = _TestUtil.docs(random(), termsEnum3, null, dEnum, DocsEnum.FLAG_FREQS); + dEnum = TestUtil.docs(random(), termsEnum3, null, dEnum, DocsEnum.FLAG_FREQS); assertNotNull(dEnum); assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); final int freq = dEnum.freq(); @@ -503,7 +503,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } //System.out.println("TEST: term1=" + term1); - docs1 = _TestUtil.docs(random(), termsEnum1, liveDocs1, docs1, DocsEnum.FLAG_FREQS); + docs1 = TestUtil.docs(random(), termsEnum1, liveDocs1, docs1, DocsEnum.FLAG_FREQS); while (docs1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int d = docs1.docID(); int f = docs1.freq(); @@ -536,7 +536,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } //System.out.println("TEST: term1=" + term1); - docs2 = _TestUtil.docs(random(), termsEnum2, liveDocs2, docs2, DocsEnum.FLAG_FREQS); + docs2 = TestUtil.docs(random(), termsEnum2, liveDocs2, docs2, DocsEnum.FLAG_FREQS); while (docs2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int d = r2r1[docs2.docID()]; int f = docs2.freq(); @@ -662,8 +662,8 @@ public class TestStressIndexing2 extends LuceneTestCase { assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum1.nextDoc()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum2.nextDoc()); } else { - dEnum1 = _TestUtil.docs(random(), termsEnum1, null, dEnum1, DocsEnum.FLAG_FREQS); - dEnum2 = _TestUtil.docs(random(), termsEnum2, null, dEnum2, DocsEnum.FLAG_FREQS); + dEnum1 = TestUtil.docs(random(), termsEnum1, null, dEnum1, DocsEnum.FLAG_FREQS); + dEnum2 = TestUtil.docs(random(), termsEnum2, null, dEnum2, DocsEnum.FLAG_FREQS); assertNotNull(dEnum1); assertNotNull(dEnum2); int docID1 = dEnum1.nextDoc(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java index 1af5a43f85d..fa3b909eb14 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java @@ -37,7 +37,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestStressNRT extends LuceneTestCase { volatile DirectoryReader reader; @@ -71,15 +71,15 @@ public class TestStressNRT extends LuceneTestCase { final int deletePercent = random().nextInt(50); final int deleteByQueryPercent = random().nextInt(25); final int ndocs = atLeast(50); - final int nWriteThreads = _TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5); - final int maxConcurrentCommits = _TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5); // number of committers at a time... needed if we want to avoid commit errors due to exceeding the max + final int nWriteThreads = TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5); + final int maxConcurrentCommits = TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5); // number of committers at a time... needed if we want to avoid commit errors due to exceeding the max final boolean tombstones = random().nextBoolean(); // query variables final AtomicLong operations = new AtomicLong(atLeast(10000)); // number of query operations to perform in total - final int nReadThreads = _TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5); + final int nReadThreads = TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5); initModel(ndocs); final FieldType storedOnlyType = new FieldType(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java b/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java index 119e4ff2b4d..223fa708df5 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java @@ -21,7 +21,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests {@link Terms#getSumDocFreq()} @@ -44,11 +44,11 @@ public class TestSumDocFreq extends LuceneTestCase { doc.add(field2); for (int i = 0; i < numDocs; i++) { id.setStringValue("" + i); - char ch1 = (char) _TestUtil.nextInt(random(), 'a', 'z'); - char ch2 = (char) _TestUtil.nextInt(random(), 'a', 'z'); + char ch1 = (char) TestUtil.nextInt(random(), 'a', 'z'); + char ch2 = (char) TestUtil.nextInt(random(), 'a', 'z'); field1.setStringValue("" + ch1 + " " + ch2); - ch1 = (char) _TestUtil.nextInt(random(), 'a', 'z'); - ch2 = (char) _TestUtil.nextInt(random(), 'a', 'z'); + ch1 = (char) TestUtil.nextInt(random(), 'a', 'z'); + ch2 = (char) TestUtil.nextInt(random(), 'a', 'z'); field2.setStringValue("" + ch1 + " " + ch2); writer.addDocument(doc); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java index 6a281d3b7a1..6aa4512faf2 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java @@ -18,7 +18,6 @@ package org.apache.lucene.index; */ import java.io.IOException; -import java.io.Reader; import java.util.Arrays; import org.apache.lucene.analysis.*; @@ -35,7 +34,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestTermVectorsReader extends LuceneTestCase { //Must be lexicographically sorted, will do in setup, versus trying to maintain here @@ -226,7 +225,7 @@ public class TestTermVectorsReader extends LuceneTestCase { //System.out.println("Term: " + term); assertEquals(testTerms[i], term); - docsEnum = _TestUtil.docs(random(), termsEnum, null, docsEnum, DocsEnum.FLAG_NONE); + docsEnum = TestUtil.docs(random(), termsEnum, null, docsEnum, DocsEnum.FLAG_NONE); assertNotNull(docsEnum); int doc = docsEnum.docID(); assertEquals(-1, doc); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java b/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java index 7b180c8bf41..48245af556a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java @@ -18,7 +18,6 @@ package org.apache.lucene.index; import java.io.IOException; -import java.io.Reader; import java.util.Random; import org.apache.lucene.analysis.Analyzer; @@ -31,7 +30,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; class RepeatingTokenizer extends Tokenizer { @@ -124,7 +123,7 @@ public class TestTermdocPerf extends LuceneTestCase { final Random random = new Random(random().nextLong()); for (int i=0; i seen = new HashSet(); final boolean allowEmptyString = random().nextBoolean(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java index 813771ad4f8..fb1356e6d3f 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java @@ -34,7 +34,8 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.automaton.*; public class TestTermsEnum2 extends LuceneTestCase { @@ -53,7 +54,7 @@ public class TestTermsEnum2 extends LuceneTestCase { RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); + .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); Document doc = new Document(); Field field = newStringField("field", "", Field.Store.YES); doc.add(field); @@ -61,7 +62,7 @@ public class TestTermsEnum2 extends LuceneTestCase { int num = atLeast(200); for (int i = 0; i < num; i++) { - String s = _TestUtil.randomUnicodeString(random()); + String s = TestUtil.randomUnicodeString(random()); field.setStringValue(s); terms.add(new BytesRef(s)); writer.addDocument(doc); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java index 4f1e7ae78bf..155e8fea114 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java @@ -22,7 +22,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class TestTieredMergePolicy extends LuceneTestCase { @@ -81,7 +82,7 @@ public class TestTieredMergePolicy extends LuceneTestCase { IndexWriter w = new IndexWriter(dir, conf); int maxCount = 0; - final int numDocs = _TestUtil.nextInt(random(), 20, 100); + final int numDocs = TestUtil.nextInt(random(), 20, 100); for(int i=0;i terms = new HashSet(); - int num = _TestUtil.nextInt(random(), 0, 255); + int num = TestUtil.nextInt(random(), 0, 255); for (int i = 0; i < num; i++) { sb.append(' '); - char term = (char) _TestUtil.nextInt(random(), 'a', 'z'); + char term = (char) TestUtil.nextInt(random(), 'a', 'z'); sb.append(term); terms.add("" + term); } diff --git a/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java index 861e541d02a..846b9438a17 100644 --- a/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java @@ -33,7 +33,7 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -136,8 +136,8 @@ public class BaseTestRangeFilter extends LuceneTestCase { RandomIndexWriter writer = new RandomIndexWriter(random, index.index, newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy())); - _TestUtil.reduceOpenFiles(writer.w); + .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy())); + TestUtil.reduceOpenFiles(writer.w); while(true) { diff --git a/lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java b/lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java index 8c0684a2a04..02f45e92d87 100644 --- a/lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java +++ b/lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java @@ -18,7 +18,6 @@ package org.apache.lucene.search; import java.io.IOException; -import java.io.Reader; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -31,7 +30,7 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Test; @@ -85,7 +84,7 @@ public class FuzzyTermOnShortTermsTest extends LuceneTestCase { Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 100, 1000)).setMergePolicy(newLogMergePolicy())); + .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000)).setMergePolicy(newLogMergePolicy())); for (String s : vals){ Document d = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java index 859bbeb417e..531db4e5a5d 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java @@ -34,7 +34,8 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -92,7 +93,7 @@ public class TestBoolean2 extends LuceneTestCase { RandomIndexWriter w = new RandomIndexWriter(random(), dir2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); + .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); Document doc = new Document(); doc.add(newTextField("field2", "xxx", Field.Store.NO)); for(int i=0;i numTerms) { terms.remove(random().nextInt(terms.size())); } @@ -269,7 +270,7 @@ public class TestBooleanQuery extends LuceneTestCase { nextDoc = scorer.nextDoc(); } else { // advance - int inc = _TestUtil.nextInt(random(), 1, left-1); + int inc = TestUtil.nextInt(random(), 1, left - 1); nextUpto = inc + upto; nextDoc = scorer.advance(hits.get(nextUpto).doc); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java index e60808b5ee9..f644a42f2b2 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java @@ -35,7 +35,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestCachingWrapperFilter extends LuceneTestCase { Directory dir; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java index 89d2b70ee81..fc11cb06ce3 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java @@ -33,8 +33,8 @@ import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.UnicodeUtil; -import org.apache.lucene.util._TestUtil; /** * Tests the DocTermOrdsRangeFilter @@ -53,7 +53,7 @@ public class TestDocTermOrdsRangeFilter extends LuceneTestCase { fieldName = random().nextBoolean() ? "field" : ""; // sometimes use an empty string as field name RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); + .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); List terms = new ArrayList(); int num = atLeast(200); for (int i = 0; i < num; i++) { @@ -61,7 +61,7 @@ public class TestDocTermOrdsRangeFilter extends LuceneTestCase { doc.add(newStringField("id", Integer.toString(i), Field.Store.NO)); int numTerms = random().nextInt(4); for (int j = 0; j < numTerms; j++) { - String s = _TestUtil.randomUnicodeString(random()); + String s = TestUtil.randomUnicodeString(random()); doc.add(newStringField(fieldName, s, Field.Store.NO)); // if the default codec doesn't support sortedset, we will uninvert at search time if (defaultCodecSupportsSortedSet()) { @@ -103,8 +103,8 @@ public class TestDocTermOrdsRangeFilter extends LuceneTestCase { public void testRanges() throws Exception { int num = atLeast(1000); for (int i = 0; i < num; i++) { - BytesRef lowerVal = new BytesRef(_TestUtil.randomUnicodeString(random())); - BytesRef upperVal = new BytesRef(_TestUtil.randomUnicodeString(random())); + BytesRef lowerVal = new BytesRef(TestUtil.randomUnicodeString(random())); + BytesRef upperVal = new BytesRef(TestUtil.randomUnicodeString(random())); if (upperVal.compareTo(lowerVal) < 0) { assertSame(upperVal, lowerVal, random().nextBoolean(), random().nextBoolean()); } else { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java index c1557be391e..64c5382f3a7 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java @@ -33,10 +33,10 @@ import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.automaton.AutomatonTestUtil; import org.apache.lucene.util.automaton.RegExp; import org.apache.lucene.util.UnicodeUtil; -import org.apache.lucene.util._TestUtil; /** * Tests the DocTermOrdsRewriteMethod @@ -55,7 +55,7 @@ public class TestDocTermOrdsRewriteMethod extends LuceneTestCase { fieldName = random().nextBoolean() ? "field" : ""; // sometimes use an empty string as field name RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); + .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); List terms = new ArrayList(); int num = atLeast(200); for (int i = 0; i < num; i++) { @@ -63,7 +63,7 @@ public class TestDocTermOrdsRewriteMethod extends LuceneTestCase { doc.add(newStringField("id", Integer.toString(i), Field.Store.NO)); int numTerms = random().nextInt(4); for (int j = 0; j < numTerms; j++) { - String s = _TestUtil.randomUnicodeString(random()); + String s = TestUtil.randomUnicodeString(random()); doc.add(newStringField(fieldName, s, Field.Store.NO)); // if the default codec doesn't support sortedset, we will uninvert at search time if (defaultCodecSupportsSortedSet()) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java index 2c4d3453916..8c2148a5012 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java @@ -62,7 +62,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -321,10 +321,10 @@ public class TestFieldCache extends LuceneTestCase { s = unicodeStrings[random().nextInt(i)]; } if (s == null) { - s = _TestUtil.randomUnicodeString(random()); + s = TestUtil.randomUnicodeString(random()); } } else { - s = _TestUtil.randomUnicodeString(random()); + s = TestUtil.randomUnicodeString(random()); } return s; } @@ -694,7 +694,7 @@ public class TestFieldCache extends LuceneTestCase { Document doc = new Document(); LongField field = new LongField("f", 0L, Store.YES); doc.add(field); - final long[] values = new long[_TestUtil.nextInt(random(), 1, 10)]; + final long[] values = new long[TestUtil.nextInt(random(), 1, 10)]; for (int i = 0; i < values.length; ++i) { final long v; switch (random().nextInt(10)) { @@ -708,7 +708,7 @@ public class TestFieldCache extends LuceneTestCase { v = Long.MAX_VALUE; break; default: - v = _TestUtil.nextLong(random(), -10, 10); + v = TestUtil.nextLong(random(), -10, 10); break; } values[i] = v; @@ -740,7 +740,7 @@ public class TestFieldCache extends LuceneTestCase { Document doc = new Document(); IntField field = new IntField("f", 0, Store.YES); doc.add(field); - final int[] values = new int[_TestUtil.nextInt(random(), 1, 10)]; + final int[] values = new int[TestUtil.nextInt(random(), 1, 10)]; for (int i = 0; i < values.length; ++i) { final int v; switch (random().nextInt(10)) { @@ -754,7 +754,7 @@ public class TestFieldCache extends LuceneTestCase { v = Integer.MAX_VALUE; break; default: - v = _TestUtil.nextInt(random(), -10, 10); + v = TestUtil.nextInt(random(), -10, 10); break; } values[i] = v; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java index 0889052cca5..d1ec6f58a10 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java @@ -37,7 +37,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.DocIdBitSet; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * FilteredQuery JUnit tests. @@ -392,7 +392,7 @@ public class TestFilteredQuery extends LuceneTestCase { } }; } - return _TestUtil.randomFilterStrategy(random); + return TestUtil.randomFilterStrategy(random); } /* diff --git a/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java b/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java index 5c24a464417..a16e54593f8 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java @@ -32,7 +32,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NamedThreadFactory; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Test; public class TestIndexSearcher extends LuceneTestCase { @@ -116,7 +116,7 @@ public class TestIndexSearcher extends LuceneTestCase { } } - _TestUtil.shutdownExecutorService(service); + TestUtil.shutdownExecutorService(service); } @Test diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java b/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java index 055de012946..2da514f7c4e 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java @@ -39,12 +39,12 @@ import org.apache.lucene.index.StoredDocument; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestLiveFieldValues extends LuceneTestCase { public void test() throws Exception { - Directory dir = newFSDirectory(_TestUtil.getTempDir("livefieldupdates")); + Directory dir = newFSDirectory(TestUtil.getTempDir("livefieldupdates")); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); final IndexWriter w = new IndexWriter(dir, iwc); @@ -73,7 +73,7 @@ public class TestLiveFieldValues extends LuceneTestCase { } }; - int numThreads = _TestUtil.nextInt(random(), 2, 5); + int numThreads = TestUtil.nextInt(random(), 2, 5); if (VERBOSE) { System.out.println(numThreads + " threads"); } @@ -82,7 +82,7 @@ public class TestLiveFieldValues extends LuceneTestCase { List threads = new ArrayList(); final int iters = atLeast(1000); - final int idCount = _TestUtil.nextInt(random(), 100, 10000); + final int idCount = TestUtil.nextInt(random(), 100, 10000); final double reopenChance = random().nextDouble()*0.01; final double deleteChance = random().nextDouble()*0.25; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java index c8e963cb3b1..4ca37226dc0 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java @@ -42,7 +42,7 @@ import org.apache.lucene.search.similarities.Similarity.SimWeight; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -107,7 +107,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { private static void addSome(Document doc, String values[]) { List list = Arrays.asList(values); Collections.shuffle(list, random()); - int howMany = _TestUtil.nextInt(random(), 1, list.size()); + int howMany = TestUtil.nextInt(random(), 1, list.size()); for (int i = 0; i < howMany; i++) { doc.add(new StringField("field", list.get(i), Field.Store.NO)); doc.add(new SortedSetDocValuesField("dv", new BytesRef(list.get(i)))); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java index 86c690454fe..6ae70d2e67d 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java @@ -29,7 +29,8 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class TestMultiValuedNumericRangeQuery extends LuceneTestCase { @@ -42,7 +43,7 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase { Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); + .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.ROOT)); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java index 8dea3dabdb8..150ca077630 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java @@ -37,7 +37,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.TestNumericUtils; // NaN arrays -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -61,7 +61,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 100, 1000)) + .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000)) .setMergePolicy(newLogMergePolicy())); final FieldType storedInt = new FieldType(IntField.TYPE_NOT_STORED); @@ -370,7 +370,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception { String field="field"+precisionStep; int totalTermCountT=0,totalTermCountC=0,termCountT,termCountC; - int num = _TestUtil.nextInt(random(), 10, 20); + int num = TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset; int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset; @@ -493,7 +493,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { private void testRangeSplit(int precisionStep) throws Exception { String field="ascfield"+precisionStep; // 10 random tests - int num = _TestUtil.nextInt(random(), 10, 20); + int num = TestUtil.nextInt(random(), 10, 20); for (int i =0; i< num; i++) { int lower=(int)(random().nextDouble()*noDocs - noDocs/2); int upper=(int)(random().nextDouble()*noDocs - noDocs/2); @@ -569,7 +569,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { String field="field"+precisionStep; // 10 random tests, the index order is ascending, // so using a reverse sort field should retun descending documents - int num = _TestUtil.nextInt(random(), 10, 20); + int num = TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset; int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java index 3f17c65f8f1..961598d2665 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java @@ -37,7 +37,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.TestNumericUtils; // NaN arrays -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -61,7 +62,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 100, 1000)) + .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000)) .setMergePolicy(newLogMergePolicy())); final FieldType storedLong = new FieldType(LongField.TYPE_NOT_STORED); @@ -397,7 +398,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception { String field="field"+precisionStep; int totalTermCountT=0,totalTermCountC=0,termCountT,termCountC; - int num = _TestUtil.nextInt(random(), 10, 20); + int num = TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset; long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset; @@ -525,7 +526,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { private void testRangeSplit(int precisionStep) throws Exception { String field="ascfield"+precisionStep; // 10 random tests - int num = _TestUtil.nextInt(random(), 10, 20); + int num = TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { long lower=(long)(random().nextDouble()*noDocs - noDocs/2); long upper=(long)(random().nextDouble()*noDocs - noDocs/2); @@ -611,7 +612,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { String field="field"+precisionStep; // 10 random tests, the index order is ascending, // so using a reverse sort field should retun descending documents - int num = _TestUtil.nextInt(random(), 10, 20); + int num = TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset; long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java index a2dbdadc2a9..d96dad86f7b 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java @@ -31,8 +31,6 @@ import org.apache.lucene.util.*; import org.junit.AfterClass; import org.junit.BeforeClass; -import com.carrotsearch.randomizedtesting.annotations.Seed; - /** * Tests {@link PhraseQuery}. * @@ -602,7 +600,7 @@ public class TestPhraseQuery extends LuceneTestCase { int NUM_DOCS = atLeast(10); for (int i = 0; i < NUM_DOCS; i++) { // must be > 4096 so it spans multiple chunks - int termCount = _TestUtil.nextInt(random(), 4097, 8200); + int termCount = TestUtil.nextInt(random(), 4097, 8200); List doc = new ArrayList(); @@ -612,7 +610,7 @@ public class TestPhraseQuery extends LuceneTestCase { // make new non-empty-string term String term; while(true) { - term = _TestUtil.randomUnicodeString(r); + term = TestUtil.randomUnicodeString(r); if (term.length() > 0) { break; } @@ -630,7 +628,7 @@ public class TestPhraseQuery extends LuceneTestCase { } else { // pick existing sub-phrase List lastDoc = docs.get(r.nextInt(docs.size())); - int len = _TestUtil.nextInt(r, 1, 10); + int len = TestUtil.nextInt(r, 1, 10); int start = r.nextInt(lastDoc.size()-len); for(int k=start;k doc = docs.get(docID); - final int numTerm = _TestUtil.nextInt(r, 2, 20); + final int numTerm = TestUtil.nextInt(r, 2, 20); final int start = r.nextInt(doc.size()-numTerm); PhraseQuery pq = new PhraseQuery(); StringBuilder sb = new StringBuilder(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java index 6a4228f1fa3..5501ce713a8 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java @@ -34,7 +34,8 @@ import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.StringHelper; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * Create an index with random unicode terms @@ -51,7 +52,7 @@ public class TestPrefixRandom extends LuceneTestCase { dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); + .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); Document doc = new Document(); Field field = newStringField("field", "", Field.Store.NO); @@ -59,7 +60,7 @@ public class TestPrefixRandom extends LuceneTestCase { int num = atLeast(1000); for (int i = 0; i < num; i++) { - field.setStringValue(_TestUtil.randomUnicodeString(random(), 10)); + field.setStringValue(TestUtil.randomUnicodeString(random(), 10)); writer.addDocument(doc); } reader = writer.getReader(); @@ -113,7 +114,7 @@ public class TestPrefixRandom extends LuceneTestCase { public void testPrefixes() throws Exception { int num = atLeast(100); for (int i = 0; i < num; i++) - assertSame(_TestUtil.randomUnicodeString(random(), 5)); + assertSame(TestUtil.randomUnicodeString(random(), 5)); } /** check that the # of hits is the same as from a very diff --git a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom.java index 97015d5f2ee..95760e19d55 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom.java @@ -32,7 +32,7 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Create an index with terms from 000-999. @@ -50,7 +50,7 @@ public class TestRegexpRandom extends LuceneTestCase { dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); + .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); Document doc = new Document(); FieldType customType = new FieldType(TextField.TYPE_STORED); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java index 54ab48aa7d5..582e00f9e35 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java @@ -37,8 +37,8 @@ import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.UnicodeUtil; -import org.apache.lucene.util._TestUtil; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.AutomatonTestUtil; import org.apache.lucene.util.automaton.CharacterRunAutomaton; @@ -62,14 +62,14 @@ public class TestRegexpRandom2 extends LuceneTestCase { fieldName = random().nextBoolean() ? "field" : ""; // sometimes use an empty string as field name RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000))); + .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); Document doc = new Document(); Field field = newStringField(fieldName, "", Field.Store.NO); doc.add(field); List terms = new ArrayList(); int num = atLeast(200); for (int i = 0; i < num; i++) { - String s = _TestUtil.randomUnicodeString(random()); + String s = TestUtil.randomUnicodeString(random()); field.setStringValue(s); terms.add(s); writer.addDocument(doc); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java b/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java index 3935add5dde..2356e91be35 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java @@ -35,8 +35,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.RamUsageEstimator; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class TestSameScoresWithThreads extends LuceneTestCase { @@ -80,7 +80,7 @@ public class TestSameScoresWithThreads extends LuceneTestCase { if (!answers.isEmpty()) { final CountDownLatch startingGun = new CountDownLatch(1); - int numThreads = _TestUtil.nextInt(random(), 2, 5); + int numThreads = TestUtil.nextInt(random(), 2, 5); Thread[] threads = new Thread[numThreads]; for(int threadID=0;threadID seen = new HashSet(); - final int maxLength = _TestUtil.nextInt(random, 5, 100); + final int maxLength = TestUtil.nextInt(random, 5, 100); if (VERBOSE) { System.out.println("TEST: NUM_DOCS=" + NUM_DOCS + " maxLength=" + maxLength + " allowDups=" + allowDups); } @@ -69,9 +70,9 @@ public class TestSortRandom extends LuceneTestCase { if (random().nextInt(10) != 7) { final String s; if (random.nextBoolean()) { - s = _TestUtil.randomSimpleString(random, maxLength); + s = TestUtil.randomSimpleString(random, maxLength); } else { - s = _TestUtil.randomUnicodeString(random, maxLength); + s = TestUtil.randomUnicodeString(random, maxLength); } if (!allowDups) { @@ -145,7 +146,7 @@ public class TestSortRandom extends LuceneTestCase { } else { sort = new Sort(sf, SortField.FIELD_DOC); } - final int hitCount = _TestUtil.nextInt(random, 1, r.maxDoc() + 20); + final int hitCount = TestUtil.nextInt(random, 1, r.maxDoc() + 20); final RandomFilter f = new RandomFilter(random, random.nextFloat(), docValues); int queryType = random.nextInt(3); if (queryType == 0) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java index 4c49c3d28e7..1dcd43d09bb 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java @@ -35,7 +35,8 @@ import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class TestTopDocsMerge extends LuceneTestCase { @@ -86,7 +87,7 @@ public class TestTopDocsMerge extends LuceneTestCase { for(int contentIDX=0;contentIDX primaryExtensions) throws IOException { - File primDir = _TestUtil.getTempDir("foo"); - File secondDir = _TestUtil.getTempDir("bar"); + File primDir = TestUtil.getTempDir("foo"); + File secondDir = TestUtil.getTempDir("bar"); return newFSSwitchDirectory(primDir, secondDir, primaryExtensions); } @@ -102,10 +102,10 @@ public class TestFileSwitchDirectory extends LuceneTestCase { // LUCENE-3380 -- make sure we get exception if the directory really does not exist. public void testNoDir() throws Throwable { - File primDir = _TestUtil.getTempDir("foo"); - File secondDir = _TestUtil.getTempDir("bar"); - _TestUtil.rmDir(primDir); - _TestUtil.rmDir(secondDir); + File primDir = TestUtil.getTempDir("foo"); + File secondDir = TestUtil.getTempDir("bar"); + TestUtil.rmDir(primDir); + TestUtil.rmDir(secondDir); Directory dir = newFSSwitchDirectory(primDir, secondDir, Collections.emptySet()); try { DirectoryReader.open(dir); diff --git a/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java b/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java index 6f8345dd917..137ed923463 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java @@ -36,7 +36,8 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class TestLockFactory extends LuceneTestCase { @@ -135,7 +136,7 @@ public class TestLockFactory extends LuceneTestCase { // no unexpected exceptions are raised: @Nightly public void testStressLocks() throws Exception { - _testStressLocks(null, _TestUtil.getTempDir("index.TestLockFactory6")); + _testStressLocks(null, TestUtil.getTempDir("index.TestLockFactory6")); } // Verify: do stress test, by opening IndexReaders and @@ -144,7 +145,7 @@ public class TestLockFactory extends LuceneTestCase { // NativeFSLockFactory: @Nightly public void testStressLocksNativeFSLockFactory() throws Exception { - File dir = _TestUtil.getTempDir("index.TestLockFactory7"); + File dir = TestUtil.getTempDir("index.TestLockFactory7"); _testStressLocks(new NativeFSLockFactory(dir), dir); } @@ -170,7 +171,7 @@ public class TestLockFactory extends LuceneTestCase { dir.close(); // Cleanup - _TestUtil.rmDir(indexDir); + TestUtil.rmDir(indexDir); } // Verify: NativeFSLockFactory works correctly @@ -237,8 +238,8 @@ public class TestLockFactory extends LuceneTestCase { // Verify: NativeFSLockFactory assigns null as lockPrefix if the lockDir is inside directory public void testNativeFSLockFactoryPrefix() throws IOException { - File fdir1 = _TestUtil.getTempDir("TestLockFactory.8"); - File fdir2 = _TestUtil.getTempDir("TestLockFactory.8.Lockdir"); + File fdir1 = TestUtil.getTempDir("TestLockFactory.8"); + File fdir2 = TestUtil.getTempDir("TestLockFactory.8.Lockdir"); Directory dir1 = newFSDirectory(fdir1, new NativeFSLockFactory(fdir1)); // same directory, but locks are stored somewhere else. The prefix of the lock factory should != null Directory dir2 = newFSDirectory(fdir1, new NativeFSLockFactory(fdir2)); @@ -251,8 +252,8 @@ public class TestLockFactory extends LuceneTestCase { dir1.close(); dir2.close(); - _TestUtil.rmDir(fdir1); - _TestUtil.rmDir(fdir2); + TestUtil.rmDir(fdir1); + TestUtil.rmDir(fdir2); } // Verify: default LockFactory has no prefix (ie @@ -260,7 +261,7 @@ public class TestLockFactory extends LuceneTestCase { public void testDefaultFSLockFactoryPrefix() throws IOException { // Make sure we get null prefix, which wont happen if setLockFactory is ever called. - File dirName = _TestUtil.getTempDir("TestLockFactory.10"); + File dirName = TestUtil.getTempDir("TestLockFactory.10"); Directory dir = new SimpleFSDirectory(dirName); assertNull("Default lock prefix should be null", dir.getLockFactory().getLockPrefix()); @@ -274,7 +275,7 @@ public class TestLockFactory extends LuceneTestCase { assertNull("Default lock prefix should be null", dir.getLockFactory().getLockPrefix()); dir.close(); - _TestUtil.rmDir(dirName); + TestUtil.rmDir(dirName); } private class WriterThread extends Thread { diff --git a/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java b/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java index dbc34618903..9ddd7155b5a 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java @@ -28,7 +28,7 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory.IndexInputSlicer; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests MMapDirectory's MultiMMapIndexInput @@ -44,12 +44,12 @@ public class TestMultiMMap extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); assumeTrue("test requires a jre that supports unmapping", MMapDirectory.UNMAP_SUPPORTED); - workDir = _TestUtil.getTempDir("TestMultiMMap"); + workDir = TestUtil.getTempDir("TestMultiMMap"); workDir.mkdirs(); } public void testCloneSafety() throws Exception { - MMapDirectory mmapDir = new MMapDirectory(_TestUtil.getTempDir("testCloneSafety")); + MMapDirectory mmapDir = new MMapDirectory(TestUtil.getTempDir("testCloneSafety")); IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random())); io.writeVInt(5); io.close(); @@ -83,7 +83,7 @@ public class TestMultiMMap extends LuceneTestCase { } public void testCloneClose() throws Exception { - MMapDirectory mmapDir = new MMapDirectory(_TestUtil.getTempDir("testCloneClose")); + MMapDirectory mmapDir = new MMapDirectory(TestUtil.getTempDir("testCloneClose")); IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random())); io.writeVInt(5); io.close(); @@ -105,7 +105,7 @@ public class TestMultiMMap extends LuceneTestCase { } public void testCloneSliceSafety() throws Exception { - MMapDirectory mmapDir = new MMapDirectory(_TestUtil.getTempDir("testCloneSliceSafety")); + MMapDirectory mmapDir = new MMapDirectory(TestUtil.getTempDir("testCloneSliceSafety")); IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random())); io.writeInt(1); io.writeInt(2); @@ -150,7 +150,7 @@ public class TestMultiMMap extends LuceneTestCase { } public void testCloneSliceClose() throws Exception { - MMapDirectory mmapDir = new MMapDirectory(_TestUtil.getTempDir("testCloneSliceClose")); + MMapDirectory mmapDir = new MMapDirectory(TestUtil.getTempDir("testCloneSliceClose")); IndexOutput io = mmapDir.createOutput("bytes", newIOContext(random())); io.writeInt(1); io.writeInt(2); @@ -177,7 +177,7 @@ public class TestMultiMMap extends LuceneTestCase { public void testSeekZero() throws Exception { for (int i = 0; i < 31; i++) { - MMapDirectory mmapDir = new MMapDirectory(_TestUtil.getTempDir("testSeekZero"), null, 1< { FixedBitSet b0=null; for (int i=0; i { // test ensureCapacityWords int numWords = random().nextInt(10) + 2; // make sure we grow the array (at least 128 bits) bits.ensureCapacityWords(numWords); - bit = _TestUtil.nextInt(random(), 127, (numWords << 6)-1); // pick a bit >= to 128, but still within range + bit = TestUtil.nextInt(random(), 127, (numWords << 6) - 1); // pick a bit >= to 128, but still within range bits.fastSet(bit); assertTrue(bits.fastGet(bit)); bits.fastClear(bit); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestPForDeltaDocIdSet.java b/lucene/core/src/test/org/apache/lucene/util/TestPForDeltaDocIdSet.java index b1e847bc393..0439730de4e 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestPForDeltaDocIdSet.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestPForDeltaDocIdSet.java @@ -24,7 +24,7 @@ public class TestPForDeltaDocIdSet extends BaseDocIdSetTestCase(arr, ArrayUtil.naturalComparator(), _TestUtil.nextInt(random(), 0, arr.length)); + return new ArrayTimSorter(arr, ArrayUtil.naturalComparator(), TestUtil.nextInt(random(), 0, arr.length)); } } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java b/lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java index 688469c163c..139c2e26d63 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java @@ -111,7 +111,7 @@ public class TestUnicodeUtil extends LuceneTestCase { BytesRef utf8 = new BytesRef(20); int num = atLeast(50000); for (int i = 0; i < num; i++) { - final String s = _TestUtil.randomUnicodeString(random()); + final String s = TestUtil.randomUnicodeString(random()); UnicodeUtil.UTF16toUTF8(s, 0, s.length(), utf8); assertEquals(s.codePointCount(0, s.length()), UnicodeUtil.codePointCount(utf8)); @@ -142,7 +142,7 @@ public class TestUnicodeUtil extends LuceneTestCase { int[] codePoints = new int[20]; int num = atLeast(50000); for (int i = 0; i < num; i++) { - final String s = _TestUtil.randomUnicodeString(random()); + final String s = TestUtil.randomUnicodeString(random()); UnicodeUtil.UTF16toUTF8(s, 0, s.length(), utf8); UnicodeUtil.UTF8toUTF32(utf8, utf32); @@ -208,7 +208,7 @@ public class TestUnicodeUtil extends LuceneTestCase { public void testUTF8UTF16CharsRef() { int num = atLeast(3989); for (int i = 0; i < num; i++) { - String unicode = _TestUtil.randomRealisticUnicodeString(random()); + String unicode = TestUtil.randomRealisticUnicodeString(random()); BytesRef ref = new BytesRef(unicode); char[] arr = new char[1 + random().nextInt(100)]; int offset = random().nextInt(arr.length); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestWAH8DocIdSet.java b/lucene/core/src/test/org/apache/lucene/util/TestWAH8DocIdSet.java index 9874d970d87..de135db5954 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestWAH8DocIdSet.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestWAH8DocIdSet.java @@ -26,7 +26,7 @@ public class TestWAH8DocIdSet extends BaseDocIdSetTestCase { @Override public WAH8DocIdSet copyOf(BitSet bs, int length) throws IOException { - final int indexInterval = _TestUtil.nextInt(random(), 8, 256); + final int indexInterval = TestUtil.nextInt(random(), 8, 256); final WAH8DocIdSet.Builder builder = new WAH8DocIdSet.Builder().setIndexInterval(indexInterval); for (int i = bs.nextSetBit(0); i != -1; i = bs.nextSetBit(i + 1)) { builder.add(i); @@ -42,8 +42,8 @@ public class TestWAH8DocIdSet extends BaseDocIdSetTestCase { } public void testUnion() throws IOException { - final int numBits = _TestUtil.nextInt(random(), 100, 1 << 20); - final int numDocIdSets = _TestUtil.nextInt(random(), 0, 4); + final int numBits = TestUtil.nextInt(random(), 100, 1 << 20); + final int numDocIdSets = TestUtil.nextInt(random(), 0, 4); final List fixedSets = new ArrayList(numDocIdSets); for (int i = 0; i < numDocIdSets; ++i) { fixedSets.add(randomSet(numBits, random().nextFloat() / 16)); @@ -64,8 +64,8 @@ public class TestWAH8DocIdSet extends BaseDocIdSetTestCase { } public void testIntersection() throws IOException { - final int numBits = _TestUtil.nextInt(random(), 100, 1 << 20); - final int numDocIdSets = _TestUtil.nextInt(random(), 1, 4); + final int numBits = TestUtil.nextInt(random(), 100, 1 << 20); + final int numDocIdSets = TestUtil.nextInt(random(), 1, 4); final List fixedSets = new ArrayList(numDocIdSets); for (int i = 0; i < numDocIdSets; ++i) { fixedSets.add(randomSet(numBits, random().nextFloat())); diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java index b2092011cce..622aa9d835b 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java @@ -28,7 +28,7 @@ public class TestBasicOperations extends LuceneTestCase { public void testStringUnion() { List strings = new ArrayList(); for (int i = RandomInts.randomIntBetween(random(), 0, 1000); --i >= 0;) { - strings.add(new BytesRef(_TestUtil.randomUnicodeString(random()))); + strings.add(new BytesRef(TestUtil.randomUnicodeString(random()))); } Collections.sort(strings); diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestCompiledAutomaton.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestCompiledAutomaton.java index ae27033c345..b45ee8f7d93 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestCompiledAutomaton.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestCompiledAutomaton.java @@ -26,7 +26,7 @@ import java.util.Set; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestCompiledAutomaton extends LuceneTestCase { @@ -104,7 +104,7 @@ public class TestCompiledAutomaton extends LuceneTestCase { private String randomString() { // return _TestUtil.randomSimpleString(random); - return _TestUtil.randomRealisticUnicodeString(random()); + return TestUtil.randomRealisticUnicodeString(random()); } public void testBasic() throws Exception { diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java index 69e09462441..1bf01e7dce9 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java @@ -22,7 +22,7 @@ import java.util.Collections; import java.util.List; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Not thorough, but tries to test determinism correctness @@ -38,7 +38,7 @@ public class TestDeterminizeLexicon extends LuceneTestCase { automata.clear(); terms.clear(); for (int j = 0; j < 5000; j++) { - String randomString = _TestUtil.randomUnicodeString(random()); + String randomString = TestUtil.randomUnicodeString(random()); terms.add(randomString); automata.add(BasicAutomata.makeString(randomString)); } diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestUTF32ToUTF8.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestUTF32ToUTF8.java index eeff2613d8e..68c75a1f8cb 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestUTF32ToUTF8.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestUTF32ToUTF8.java @@ -18,7 +18,7 @@ package org.apache.lucene.util.automaton; */ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; @@ -90,7 +90,7 @@ public class TestUTF32ToUTF8 extends LuceneTestCase { final int invalidRange = MAX_UNICODE - (endCode - startCode + 1); if (invalidRange > 0) { for(int iter=0;iter= startCode) { code = endCode + 1 + x - startCode; @@ -114,13 +114,13 @@ public class TestUTF32ToUTF8 extends LuceneTestCase { private int getCodeStart(Random r) { switch(r.nextInt(4)) { case 0: - return _TestUtil.nextInt(r, 0, 128); + return TestUtil.nextInt(r, 0, 128); case 1: - return _TestUtil.nextInt(r, 128, 2048); + return TestUtil.nextInt(r, 128, 2048); case 2: - return _TestUtil.nextInt(r, 2048, 65536); + return TestUtil.nextInt(r, 2048, 65536); default: - return _TestUtil.nextInt(r, 65536, 1+MAX_UNICODE); + return TestUtil.nextInt(r, 65536, 1 + MAX_UNICODE); } } @@ -218,7 +218,7 @@ public class TestUTF32ToUTF8 extends LuceneTestCase { final String string; if (random().nextBoolean()) { // likely not accepted - string = _TestUtil.randomUnicodeString(random()); + string = TestUtil.randomUnicodeString(random()); } else { // will be accepted int[] codepoints = ras.getRandomAcceptedString(random()); diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java b/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java index a149ed63bab..21feb91bfc3 100644 --- a/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java +++ b/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java @@ -28,8 +28,8 @@ import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.TimeUnits; -import org.apache.lucene.util._TestUtil; import org.apache.lucene.util.packed.PackedInts; import org.junit.Ignore; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; @@ -45,7 +45,7 @@ public class Test2BFST extends LuceneTestCase { IntsRef input = new IntsRef(ints, 0, ints.length); long seed = random().nextLong(); - Directory dir = new MMapDirectory(_TestUtil.getTempDir("2BFST")); + Directory dir = new MMapDirectory(TestUtil.getTempDir("2BFST")); for(int doPackIter=0;doPackIter<2;doPackIter++) { boolean doPack = doPackIter == 1; diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestBytesStore.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestBytesStore.java index 7b598ed69a8..390bef8b46f 100644 --- a/lucene/core/src/test/org/apache/lucene/util/fst/TestBytesStore.java +++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestBytesStore.java @@ -24,7 +24,7 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestBytesStore extends LuceneTestCase { @@ -32,9 +32,9 @@ public class TestBytesStore extends LuceneTestCase { final int iters = atLeast(10); for(int iter=0;iter 1) { - int numOps = _TestUtil.nextInt(random(), 100, 200); + int numOps = TestUtil.nextInt(random(), 100, 200); for(int op=0;op> pairs = new ArrayList>(terms.length); long lastOutput = 0; for(int idx=0;idx(terms[idx], value)); } @@ -197,7 +197,7 @@ public class TestFSTs extends LuceneTestCase { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); final List> pairs = new ArrayList>(terms.length); for(int idx=0;idx(terms[idx], _TestUtil.nextLong(random(), 0, Long.MAX_VALUE))); + pairs.add(new FSTTester.InputOutput(terms[idx], TestUtil.nextLong(random(), 0, Long.MAX_VALUE))); } new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(true); } @@ -210,7 +210,7 @@ public class TestFSTs extends LuceneTestCase { final List>> pairs = new ArrayList>>(terms.length); long lastOutput = 0; for(int idx=0;idx>(terms[idx], outputs.newPair((long) idx, value))); @@ -283,7 +283,7 @@ public class TestFSTs extends LuceneTestCase { @Nightly public void testBigSet() throws IOException { - testRandomWords(_TestUtil.nextInt(random(), 50000, 60000), 1); + testRandomWords(TestUtil.nextInt(random(), 50000, 60000), 1); } // Build FST for all unique terms in the test line docs @@ -293,7 +293,7 @@ public class TestFSTs extends LuceneTestCase { final LineFileDocs docs = new LineFileDocs(random(), true); final int RUN_TIME_MSEC = atLeast(500); final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(-1).setRAMBufferSizeMB(64); - final File tempDir = _TestUtil.getTempDir("fstlines"); + final File tempDir = TestUtil.getTempDir("fstlines"); final Directory dir = newFSDirectory(tempDir); final IndexWriter writer = new IndexWriter(dir, conf); final long stopTime = System.currentTimeMillis() + RUN_TIME_MSEC; @@ -663,7 +663,7 @@ public class TestFSTs extends LuceneTestCase { rand = new Random(17); } return outputs.newPair((long) ord, - (long) _TestUtil.nextInt(rand, 1, 5000)); + (long) TestUtil.nextInt(rand, 1, 5000)); } }.run(limit, verify, false); } else if (storeOrds) { @@ -685,7 +685,7 @@ public class TestFSTs extends LuceneTestCase { if (ord == 0) { rand = new Random(17); } - return (long) _TestUtil.nextInt(rand, 1, 5000); + return (long) TestUtil.nextInt(rand, 1, 5000); } }.run(limit, verify, false); } else { @@ -1302,7 +1302,7 @@ public class TestFSTs extends LuceneTestCase { for (int i = 0; i < numWords; i++) { String s; while (true) { - s = _TestUtil.randomSimpleString(random); + s = TestUtil.randomSimpleString(random); if (!slowCompletor.containsKey(s)) { break; } @@ -1311,7 +1311,7 @@ public class TestFSTs extends LuceneTestCase { for (int j = 1; j < s.length(); j++) { allPrefixes.add(s.substring(0, j)); } - int weight = _TestUtil.nextInt(random, 1, 100); // weights 1..100 + int weight = TestUtil.nextInt(random, 1, 100); // weights 1..100 slowCompletor.put(s, (long)weight); } @@ -1342,7 +1342,7 @@ public class TestFSTs extends LuceneTestCase { prefixOutput += arc.output; } - final int topN = _TestUtil.nextInt(random, 1, 10); + final int topN = TestUtil.nextInt(random, 1, 10); Util.MinResult[] r = Util.shortestPaths(fst, arc, fst.outputs.getNoOutput(), minLongComparator, topN, true); @@ -1420,7 +1420,7 @@ public class TestFSTs extends LuceneTestCase { for (int i = 0; i < numWords; i++) { String s; while (true) { - s = _TestUtil.randomSimpleString(random); + s = TestUtil.randomSimpleString(random); if (!slowCompletor.containsKey(s)) { break; } @@ -1429,8 +1429,8 @@ public class TestFSTs extends LuceneTestCase { for (int j = 1; j < s.length(); j++) { allPrefixes.add(s.substring(0, j)); } - int weight = _TestUtil.nextInt(random, 1, 100); // weights 1..100 - int output = _TestUtil.nextInt(random, 0, 500); // outputs 0..500 + int weight = TestUtil.nextInt(random, 1, 100); // weights 1..100 + int output = TestUtil.nextInt(random, 0, 500); // outputs 0..500 slowCompletor.put(s, new TwoLongs(weight, output)); } @@ -1463,7 +1463,7 @@ public class TestFSTs extends LuceneTestCase { prefixOutput = outputs.add(prefixOutput, arc.output); } - final int topN = _TestUtil.nextInt(random, 1, 10); + final int topN = TestUtil.nextInt(random, 1, 10); Util.MinResult>[] r = Util.shortestPaths(fst, arc, fst.outputs.getNoOutput(), minPairWeightComparator, topN, true); diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java index e749b290a02..9e9c7393443 100644 --- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java +++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java @@ -19,7 +19,7 @@ package org.apache.lucene.util.junitcompat; import java.io.File; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Assert; import org.junit.Test; import org.junit.runner.JUnitCore; @@ -33,7 +33,7 @@ public class TestLeaveFilesIfTestFails extends WithNestedTests { public static class Nested1 extends WithNestedTests.AbstractNestedTest { static File file; public void testDummy() { - file = _TestUtil.getTempDir("leftover"); + file = TestUtil.getTempDir("leftover"); file.mkdirs(); fail(); } diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSameRandomnessLocalePassedOrNot.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSameRandomnessLocalePassedOrNot.java index 191348da2fb..de92566ae1b 100644 --- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSameRandomnessLocalePassedOrNot.java +++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSameRandomnessLocalePassedOrNot.java @@ -2,7 +2,7 @@ package org.apache.lucene.util.junitcompat; import java.util.*; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.*; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; @@ -53,7 +53,7 @@ public class TestSameRandomnessLocalePassedOrNot extends WithNestedTests { seed = RandomizedContext.current().getRunnerSeedAsString(); Random rnd = random(); - pickString = _TestUtil.randomSimpleString(rnd); + pickString = TestUtil.randomSimpleString(rnd); defaultLocale = Locale.getDefault(); defaultTimeZone = TimeZone.getDefault(); diff --git a/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java b/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java index fc859148f9d..d997f38f913 100644 --- a/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java +++ b/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java @@ -38,7 +38,7 @@ import org.apache.lucene.util.LongsRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.lucene.util.RamUsageEstimator; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.packed.PackedInts.Reader; import org.junit.Ignore; @@ -89,10 +89,10 @@ public class TestPackedInts extends LuceneTestCase { for (int iter = 0; iter < num; iter++) { for(int nbits=1;nbits<=64;nbits++) { final long maxValue = PackedInts.maxValue(nbits); - final int valueCount = _TestUtil.nextInt(random(), 1, 600); + final int valueCount = TestUtil.nextInt(random(), 1, 600); final int bufferSize = random().nextBoolean() - ? _TestUtil.nextInt(random(), 0, 48) - : _TestUtil.nextInt(random(), 0, 4096); + ? TestUtil.nextInt(random(), 0, 48) + : TestUtil.nextInt(random(), 0, 4096); final Directory d = newDirectory(); IndexOutput out = d.createOutput("out.bin", newIOContext(random())); @@ -106,13 +106,13 @@ public class TestPackedInts extends LuceneTestCase { PackedInts.Writer w = PackedInts.getWriter(out, valueCount, nbits, acceptableOverhead); final long startFp = out.getFilePointer(); - final int actualValueCount = random().nextBoolean() ? valueCount : _TestUtil.nextInt(random(), 0, valueCount); + final int actualValueCount = random().nextBoolean() ? valueCount : TestUtil.nextInt(random(), 0, valueCount); final long[] values = new long[valueCount]; for(int i=0;i packedInts = createPackedInts(valueCount, bpv); for (PackedInts.Mutable ints : packedInts) { String msg = ints.getClass().getSimpleName() + " bpv=" + bpv + ", from=" + from + ", to=" + to + ", val=" + val; @@ -547,9 +547,9 @@ public class TestPackedInts extends LuceneTestCase { public void testPackedIntsNull() { // must be > 10 for the bulk reads below - int size = _TestUtil.nextInt(random(), 11, 256); + int size = TestUtil.nextInt(random(), 11, 256); Reader packedInts = new PackedInts.NullReader(size); - assertEquals(0, packedInts.get(_TestUtil.nextInt(random(), 0, size - 1))); + assertEquals(0, packedInts.get(TestUtil.nextInt(random(), 0, size - 1))); long[] arr = new long[size + 10]; int r; Arrays.fill(arr, 1); @@ -570,7 +570,7 @@ public class TestPackedInts extends LuceneTestCase { public void testBulkGet() { final int valueCount = 1111; final int index = random().nextInt(valueCount); - final int len = _TestUtil.nextInt(random(), 1, valueCount * 2); + final int len = TestUtil.nextInt(random(), 1, valueCount * 2); final int off = random().nextInt(77); for (int bpv = 1; bpv <= 64; ++bpv) { @@ -605,7 +605,7 @@ public class TestPackedInts extends LuceneTestCase { public void testBulkSet() { final int valueCount = 1111; final int index = random().nextInt(valueCount); - final int len = _TestUtil.nextInt(random(), 1, valueCount * 2); + final int len = TestUtil.nextInt(random(), 1, valueCount * 2); final int off = random().nextInt(77); long[] arr = new long[off+len]; @@ -636,7 +636,7 @@ public class TestPackedInts extends LuceneTestCase { } public void testCopy() { - final int valueCount = _TestUtil.nextInt(random(), 5, 600); + final int valueCount = TestUtil.nextInt(random(), 5, 600); final int off1 = random().nextInt(valueCount); final int off2 = random().nextInt(valueCount); final int len = random().nextInt(Math.min(valueCount - off1, valueCount - off2)); @@ -691,9 +691,9 @@ public class TestPackedInts extends LuceneTestCase { } public void testPagedGrowableWriter() { - int pageSize = 1 << (_TestUtil.nextInt(random(), 6, 30)); + int pageSize = 1 << (TestUtil.nextInt(random(), 6, 30)); // supports 0 values? - PagedGrowableWriter writer = new PagedGrowableWriter(0, pageSize, _TestUtil.nextInt(random(), 1, 64), random().nextFloat()); + PagedGrowableWriter writer = new PagedGrowableWriter(0, pageSize, TestUtil.nextInt(random(), 1, 64), random().nextFloat()); assertEquals(0, writer.size()); // compare against AppendingDeltaPackedLongBuffer @@ -701,12 +701,12 @@ public class TestPackedInts extends LuceneTestCase { int size = random().nextInt(1000000); long max = 5; for (int i = 0; i < size; ++i) { - buf.add(_TestUtil.nextLong(random(), 0, max)); + buf.add(TestUtil.nextLong(random(), 0, max)); if (rarely()) { - max = PackedInts.maxValue(rarely() ? _TestUtil.nextInt(random(), 0, 63) : _TestUtil.nextInt(random(), 0, 31)); + max = PackedInts.maxValue(rarely() ? TestUtil.nextInt(random(), 0, 63) : TestUtil.nextInt(random(), 0, 31)); } } - writer = new PagedGrowableWriter(size, pageSize, _TestUtil.nextInt(random(), 1, 64), random().nextFloat()); + writer = new PagedGrowableWriter(size, pageSize, TestUtil.nextInt(random(), 1, 64), random().nextFloat()); assertEquals(size, writer.size()); for (int i = size - 1; i >= 0; --i) { writer.set(i, buf.get(i)); @@ -719,7 +719,7 @@ public class TestPackedInts extends LuceneTestCase { assertEquals(RamUsageEstimator.sizeOf(writer), writer.ramBytesUsed(), 8); // test copy - PagedGrowableWriter copy = writer.resize(_TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2)); + PagedGrowableWriter copy = writer.resize(TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2)); for (long i = 0; i < copy.size(); ++i) { if (i < writer.size()) { assertEquals(writer.get(i), copy.get(i)); @@ -729,7 +729,7 @@ public class TestPackedInts extends LuceneTestCase { } // test grow - PagedGrowableWriter grow = writer.grow(_TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2)); + PagedGrowableWriter grow = writer.grow(TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2)); for (long i = 0; i < grow.size(); ++i) { if (i < writer.size()) { assertEquals(writer.get(i), grow.get(i)); @@ -740,9 +740,9 @@ public class TestPackedInts extends LuceneTestCase { } public void testPagedMutable() { - final int bitsPerValue = _TestUtil.nextInt(random(), 1, 64); + final int bitsPerValue = TestUtil.nextInt(random(), 1, 64); final long max = PackedInts.maxValue(bitsPerValue); - int pageSize = 1 << (_TestUtil.nextInt(random(), 6, 30)); + int pageSize = 1 << (TestUtil.nextInt(random(), 6, 30)); // supports 0 values? PagedMutable writer = new PagedMutable(0, pageSize, bitsPerValue, random().nextFloat() / 2); assertEquals(0, writer.size()); @@ -752,7 +752,7 @@ public class TestPackedInts extends LuceneTestCase { int size = random().nextInt(1000000); for (int i = 0; i < size; ++i) { - buf.add(bitsPerValue == 64 ? random().nextLong() : _TestUtil.nextLong(random(), 0, max)); + buf.add(bitsPerValue == 64 ? random().nextLong() : TestUtil.nextLong(random(), 0, max)); } writer = new PagedMutable(size, pageSize, bitsPerValue, random().nextFloat()); assertEquals(size, writer.size()); @@ -767,7 +767,7 @@ public class TestPackedInts extends LuceneTestCase { assertEquals(RamUsageEstimator.sizeOf(writer) - RamUsageEstimator.sizeOf(writer.format), writer.ramBytesUsed()); // test copy - PagedMutable copy = writer.resize(_TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2)); + PagedMutable copy = writer.resize(TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2)); for (long i = 0; i < copy.size(); ++i) { if (i < writer.size()) { assertEquals(writer.get(i), copy.get(i)); @@ -777,7 +777,7 @@ public class TestPackedInts extends LuceneTestCase { } // test grow - PagedMutable grow = writer.grow(_TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2)); + PagedMutable grow = writer.grow(TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2)); for (long i = 0; i < grow.size(); ++i) { if (i < writer.size()) { assertEquals(writer.get(i), grow.get(i)); @@ -790,14 +790,14 @@ public class TestPackedInts extends LuceneTestCase { // memory hole @Ignore public void testPagedGrowableWriterOverflow() { - final long size = _TestUtil.nextLong(random(), 2 * (long) Integer.MAX_VALUE, 3 * (long) Integer.MAX_VALUE); - final int pageSize = 1 << (_TestUtil.nextInt(random(), 16, 30)); + final long size = TestUtil.nextLong(random(), 2 * (long) Integer.MAX_VALUE, 3 * (long) Integer.MAX_VALUE); + final int pageSize = 1 << (TestUtil.nextInt(random(), 16, 30)); final PagedGrowableWriter writer = new PagedGrowableWriter(size, pageSize, 1, random().nextFloat()); - final long index = _TestUtil.nextLong(random(), (long) Integer.MAX_VALUE, size - 1); + final long index = TestUtil.nextLong(random(), (long) Integer.MAX_VALUE, size - 1); writer.set(index, 2); assertEquals(2, writer.get(index)); for (int i = 0; i < 1000000; ++i) { - final long idx = _TestUtil.nextLong(random(), 0, size); + final long idx = TestUtil.nextLong(random(), 0, size); if (idx == index) { assertEquals(2, writer.get(idx)); } else { @@ -807,7 +807,7 @@ public class TestPackedInts extends LuceneTestCase { } public void testSave() throws IOException { - final int valueCount = _TestUtil.nextInt(random(), 1, 2048); + final int valueCount = TestUtil.nextInt(random(), 1, 2048); for (int bpv = 1; bpv <= 64; ++bpv) { final int maxValue = (int) Math.min(PackedInts.maxValue(31), PackedInts.maxValue(bpv)); final RAMDirectory directory = new RAMDirectory(); @@ -964,9 +964,9 @@ public class TestPackedInts extends LuceneTestCase { float[] ratioOptions = new float[]{PackedInts.DEFAULT, PackedInts.COMPACT, PackedInts.FAST}; for (int bpv : new int[]{0, 1, 63, 64, RandomInts.randomIntBetween(random(), 2, 62)}) { for (DataType dataType : DataType.values()) { - final int pageSize = 1 << _TestUtil.nextInt(random(), 6, 20); - final int initialPageCount = _TestUtil.nextInt(random(), 0, 16); - float acceptableOverheadRatio = ratioOptions[_TestUtil.nextInt(random(), 0, ratioOptions.length - 1)]; + final int pageSize = 1 << TestUtil.nextInt(random(), 6, 20); + final int initialPageCount = TestUtil.nextInt(random(), 0, 16); + float acceptableOverheadRatio = ratioOptions[TestUtil.nextInt(random(), 0, ratioOptions.length - 1)]; AbstractAppendingLongBuffer buf; final int inc; switch (dataType) { @@ -980,7 +980,7 @@ public class TestPackedInts extends LuceneTestCase { break; case MONOTONIC: buf = new MonotonicAppendingLongBuffer(initialPageCount, pageSize, acceptableOverheadRatio); - inc = _TestUtil.nextInt(random(), -1000, 1000); + inc = TestUtil.nextInt(random(), -1000, 1000); break; default: throw new RuntimeException("added a type and forgot to add it here?"); @@ -997,7 +997,7 @@ public class TestPackedInts extends LuceneTestCase { arr[i] = random().nextLong(); } } else { - final long minValue = _TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE - PackedInts.maxValue(bpv)); + final long minValue = TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE - PackedInts.maxValue(bpv)); for (int i = 0; i < arr.length; ++i) { arr[i] = minValue + inc * i + random().nextLong() & PackedInts.maxValue(bpv); // _TestUtil.nextLong is too slow } @@ -1031,7 +1031,7 @@ public class TestPackedInts extends LuceneTestCase { long[] target = new long[arr.length + 1024]; // check the request for more is OK. - for (int i = 0; i < arr.length; i += _TestUtil.nextInt(random(), 0, 10000)) { + for (int i = 0; i < arr.length; i += TestUtil.nextInt(random(), 0, 10000)) { int lenToRead = random().nextInt(buf.pageSize() * 2) + 1; lenToRead = Math.min(lenToRead, target.length - i); int lenToCheck = Math.min(lenToRead, arr.length - i); @@ -1062,11 +1062,11 @@ public class TestPackedInts extends LuceneTestCase { final boolean[] skip = new boolean[longs.length]; for (int i = 0; i < longs.length; ++i) { final int bpv = RandomInts.randomIntBetween(random(), 1, 64); - bitsPerValues[i] = random().nextBoolean() ? bpv : _TestUtil.nextInt(random(), bpv, 64); + bitsPerValues[i] = random().nextBoolean() ? bpv : TestUtil.nextInt(random(), bpv, 64); if (bpv == 64) { longs[i] = random().nextLong(); } else { - longs[i] = _TestUtil.nextLong(random(), 0, PackedInts.maxValue(bpv)); + longs[i] = TestUtil.nextLong(random(), 0, PackedInts.maxValue(bpv)); } skip[i] = rarely(); } @@ -1102,7 +1102,7 @@ public class TestPackedInts extends LuceneTestCase { public void testBlockPackedReaderWriter() throws IOException { final int iters = atLeast(2); for (int iter = 0; iter < iters; ++iter) { - final int blockSize = 1 << _TestUtil.nextInt(random(), 6, 18); + final int blockSize = 1 << TestUtil.nextInt(random(), 6, 18); final int valueCount = random().nextInt(1 << 18); final long[] values = new long[valueCount]; long minValue = 0; @@ -1117,7 +1117,7 @@ public class TestPackedInts extends LuceneTestCase { } else if (bpv == 64) { values[i] = random().nextLong(); } else { - values[i] = minValue + _TestUtil.nextLong(random(), 0, (1L << bpv) - 1); + values[i] = minValue + TestUtil.nextLong(random(), 0, (1L << bpv) - 1); } } @@ -1146,7 +1146,7 @@ public class TestPackedInts extends LuceneTestCase { assertEquals("" + i, values[i], it.next()); ++i; } else { - final LongsRef nextValues = it.next(_TestUtil.nextInt(random(), 1, 1024)); + final LongsRef nextValues = it.next(TestUtil.nextInt(random(), 1, 1024)); for (int j = 0; j < nextValues.length; ++j) { assertEquals("" + (i + j), values[i + j], nextValues.longs[nextValues.offset + j]); } @@ -1170,7 +1170,7 @@ public class TestPackedInts extends LuceneTestCase { final BlockPackedReaderIterator it2 = new BlockPackedReaderIterator(in, PackedInts.VERSION_CURRENT, blockSize, valueCount); int i = 0; while (true) { - final int skip = _TestUtil.nextInt(random(), 0, valueCount - i); + final int skip = TestUtil.nextInt(random(), 0, valueCount - i); it2.skip(skip); i += skip; assertEquals(i, it2.ord()); @@ -1203,7 +1203,7 @@ public class TestPackedInts extends LuceneTestCase { public void testMonotonicBlockPackedReaderWriter() throws IOException { final int iters = atLeast(2); for (int iter = 0; iter < iters; ++iter) { - final int blockSize = 1 << _TestUtil.nextInt(random(), 6, 18); + final int blockSize = 1 << TestUtil.nextInt(random(), 6, 18); final int valueCount = random().nextInt(1 << 18); final long[] values = new long[valueCount]; if (valueCount > 0) { @@ -1213,7 +1213,7 @@ public class TestPackedInts extends LuceneTestCase { if (random().nextDouble() < 0.1d) { maxDelta = random().nextInt(64); } - values[i] = Math.max(0, values[i-1] + _TestUtil.nextInt(random(), -16, maxDelta)); + values[i] = Math.max(0, values[i-1] + TestUtil.nextInt(random(), -16, maxDelta)); } } @@ -1243,13 +1243,13 @@ public class TestPackedInts extends LuceneTestCase { @Nightly public void testBlockReaderOverflow() throws IOException { - final long valueCount = _TestUtil.nextLong(random(), 1L + Integer.MAX_VALUE, (long) Integer.MAX_VALUE * 2); - final int blockSize = 1 << _TestUtil.nextInt(random(), 20, 22); + final long valueCount = TestUtil.nextLong(random(), 1L + Integer.MAX_VALUE, (long) Integer.MAX_VALUE * 2); + final int blockSize = 1 << TestUtil.nextInt(random(), 20, 22); final Directory dir = newDirectory(); final IndexOutput out = dir.createOutput("out.bin", IOContext.DEFAULT); final BlockPackedWriter writer = new BlockPackedWriter(out, blockSize); long value = random().nextInt() & 0xFFFFFFFFL; - long valueOffset = _TestUtil.nextLong(random(), 0, valueCount - 1); + long valueOffset = TestUtil.nextLong(random(), 0, valueCount - 1); for (long i = 0; i < valueCount; ) { assertEquals(i, writer.ord()); if ((i & (blockSize - 1)) == 0 && (i + blockSize < valueOffset || i > valueOffset && i + blockSize < valueCount)) { @@ -1273,7 +1273,7 @@ public class TestPackedInts extends LuceneTestCase { final BlockPackedReader reader = new BlockPackedReader(in, PackedInts.VERSION_CURRENT, blockSize, valueCount, random().nextBoolean()); assertEquals(value, reader.get(valueOffset)); for (int i = 0; i < 5; ++i) { - final long offset = _TestUtil.nextLong(random(), 0, valueCount - 1); + final long offset = TestUtil.nextLong(random(), 0, valueCount - 1); if (offset == valueOffset) { assertEquals(value, reader.get(offset)); } else { diff --git a/lucene/demo/src/test/org/apache/lucene/demo/TestDemo.java b/lucene/demo/src/test/org/apache/lucene/demo/TestDemo.java index afc445cd3a4..d3b91c3af50 100644 --- a/lucene/demo/src/test/org/apache/lucene/demo/TestDemo.java +++ b/lucene/demo/src/test/org/apache/lucene/demo/TestDemo.java @@ -23,7 +23,7 @@ import java.io.PrintStream; import java.nio.charset.Charset; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestDemo extends LuceneTestCase { @@ -44,7 +44,7 @@ public class TestDemo extends LuceneTestCase { public void testIndexSearch() throws Exception { File dir = getDataFile("test-files/docs"); - File indexDir = _TestUtil.getTempDir("ContribDemoTest"); + File indexDir = TestUtil.getTempDir("ContribDemoTest"); IndexFiles.main(new String[] { "-create", "-docs", dir.getPath(), "-index", indexDir.getPath()}); testOneSearch(indexDir, "apache", 3); testOneSearch(indexDir, "patent", 8); diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java index 0617577c40c..f98f40d510a 100644 --- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java +++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java @@ -47,7 +47,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.English; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Tests some basic expressions against different queries, @@ -63,7 +63,7 @@ public class TestExpressionSorts extends LuceneTestCase { super.setUp(); dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); - int numDocs = _TestUtil.nextInt(random(), 2049, 4000); + int numDocs = TestUtil.nextInt(random(), 2049, 4000); for (int i = 0; i < numDocs; i++) { Document document = new Document(); document.add(newTextField("english", English.intToEnglish(i), Field.Store.NO)); @@ -124,13 +124,13 @@ public class TestExpressionSorts extends LuceneTestCase { new SortField("score", SortField.Type.SCORE) }; Collections.shuffle(Arrays.asList(fields), random()); - int numSorts = _TestUtil.nextInt(random(), 1, fields.length); + int numSorts = TestUtil.nextInt(random(), 1, fields.length); assertQuery(query, filter, new Sort(Arrays.copyOfRange(fields, 0, numSorts))); } } void assertQuery(Query query, Filter filter, Sort sort) throws Exception { - int size = _TestUtil.nextInt(random(), 1, searcher.getIndexReader().maxDoc()/5); + int size = TestUtil.nextInt(random(), 1, searcher.getIndexReader().maxDoc() / 5); TopDocs expected = searcher.search(query, filter, size, sort, random().nextBoolean(), random().nextBoolean()); // make our actual sort, mutating original by replacing some of the diff --git a/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java b/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java index 0ded9544a41..80849a94d89 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java @@ -34,7 +34,7 @@ import org.apache.lucene.facet.taxonomy.TaxonomyFacetCounts; import org.apache.lucene.facet.taxonomy.TaxonomyReader; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public abstract class FacetTestCase extends LuceneTestCase { @@ -60,7 +60,7 @@ public abstract class FacetTestCase extends LuceneTestCase { protected String[] getRandomTokens(int count) { String[] tokens = new String[count]; for(int i=0;i values = new HashSet(); while (values.size() < valueCount) { - String s = _TestUtil.randomRealisticUnicodeString(random()); + String s = TestUtil.randomRealisticUnicodeString(random()); //String s = _TestUtil.randomString(random()); if (s.length() > 0) { values.add(s); @@ -523,7 +523,7 @@ public class TestDrillSideways extends FacetTestCase { if (random().nextBoolean()) { // Randomly delete a few docs: - int numDel = _TestUtil.nextInt(random(), 1, (int) (numDocs*0.05)); + int numDel = TestUtil.nextInt(random(), 1, (int) (numDocs * 0.05)); if (VERBOSE) { System.out.println("delete " + numDel); } @@ -570,7 +570,7 @@ public class TestDrillSideways extends FacetTestCase { for(int iter=0;iter 0) { break; } diff --git a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java index 8fcaec8ac97..0dc324be568 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java @@ -67,7 +67,7 @@ import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestRangeFacetCounts extends FacetTestCase { @@ -401,7 +401,7 @@ public class TestRangeFacetCounts extends FacetTestCase { if (VERBOSE) { System.out.println("TEST: iter=" + iter); } - int numRange = _TestUtil.nextInt(random(), 1, 100); + int numRange = TestUtil.nextInt(random(), 1, 100); LongRange[] ranges = new LongRange[numRange]; int[] expectedCounts = new int[numRange]; long minAcceptedValue = Long.MAX_VALUE; @@ -545,7 +545,7 @@ public class TestRangeFacetCounts extends FacetTestCase { if (VERBOSE) { System.out.println("TEST: iter=" + iter); } - int numRange = _TestUtil.nextInt(random(), 1, 5); + int numRange = TestUtil.nextInt(random(), 1, 5); DoubleRange[] ranges = new DoubleRange[numRange]; int[] expectedCounts = new int[numRange]; float minAcceptedValue = Float.POSITIVE_INFINITY; @@ -703,7 +703,7 @@ public class TestRangeFacetCounts extends FacetTestCase { if (VERBOSE) { System.out.println("TEST: iter=" + iter); } - int numRange = _TestUtil.nextInt(random(), 1, 5); + int numRange = TestUtil.nextInt(random(), 1, 5); DoubleRange[] ranges = new DoubleRange[numRange]; int[] expectedCounts = new int[numRange]; double minAcceptedValue = Double.POSITIVE_INFINITY; diff --git a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java index bccd329a567..4210ba7253c 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java @@ -41,7 +41,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestSortedSetDocValuesFacets extends FacetTestCase { @@ -278,7 +278,7 @@ public class TestSortedSetDocValuesFacets extends FacetTestCase { RandomIndexWriter w = new RandomIndexWriter(random(), indexDir); FacetsConfig config = new FacetsConfig(); int numDocs = atLeast(1000); - int numDims = _TestUtil.nextInt(random(), 1, 7); + int numDims = TestUtil.nextInt(random(), 1, 7); List testDocs = getRandomDocs(tokens, numDocs, numDims); for(TestDoc testDoc : testDocs) { Document doc = new Document(); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java index e0200086e03..3126e0636ad 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java @@ -6,7 +6,7 @@ import org.apache.lucene.facet.FacetField; import org.apache.lucene.facet.FacetTestCase; import org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetField; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Test; /* @@ -267,7 +267,7 @@ public class TestFacetLabel extends FacetTestCase { String bigComp = null; while (true) { int len = FacetLabel.MAX_CATEGORY_PATH_LENGTH; - bigComp = _TestUtil.randomSimpleString(random(), len, len); + bigComp = TestUtil.randomSimpleString(random(), len, len); if (bigComp.indexOf('\u001f') != -1) { continue; } diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java index 54bcd891e17..27af83a0bb0 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java @@ -32,7 +32,6 @@ import org.apache.lucene.facet.FacetTestCase; import org.apache.lucene.facet.Facets; import org.apache.lucene.facet.FacetsCollector; import org.apache.lucene.facet.FacetsConfig; -import org.apache.lucene.facet.taxonomy.SearcherTaxonomyManager; import org.apache.lucene.facet.taxonomy.SearcherTaxonomyManager.SearcherAndTaxonomy; import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter; import org.apache.lucene.index.IndexWriter; @@ -40,7 +39,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestSearcherTaxonomyManager extends FacetTestCase { @@ -70,7 +69,7 @@ public class TestSearcherTaxonomyManager extends FacetTestCase { List paths = new ArrayList(); while (true) { Document doc = new Document(); - int numPaths = _TestUtil.nextInt(random(), 1, 5); + int numPaths = TestUtil.nextInt(random(), 1, 5); for(int i=0;i 32 KB for one // document, we don't hit exc when using Facet42DocValuesFormat public void testManyFacetsInOneDocument() throws Exception { - assumeTrue("default Codec doesn't support huge BinaryDocValues", _TestUtil.fieldSupportsHugeBinaryDocValues(FacetsConfig.DEFAULT_INDEX_FIELD_NAME)); + assumeTrue("default Codec doesn't support huge BinaryDocValues", TestUtil.fieldSupportsHugeBinaryDocValues(FacetsConfig.DEFAULT_INDEX_FIELD_NAME)); Directory dir = newDirectory(); Directory taxoDir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); @@ -423,7 +423,7 @@ public class TestTaxonomyFacetCounts extends FacetTestCase { FacetsConfig config = new FacetsConfig(); config.setMultiValued("dim", true); - int numLabels = _TestUtil.nextInt(random(), 40000, 100000); + int numLabels = TestUtil.nextInt(random(), 40000, 100000); Document doc = new Document(); doc.add(newTextField("field", "text", Field.Store.NO)); @@ -678,7 +678,7 @@ public class TestTaxonomyFacetCounts extends FacetTestCase { DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(taxoDir); FacetsConfig config = new FacetsConfig(); int numDocs = atLeast(1000); - int numDims = _TestUtil.nextInt(random(), 1, 7); + int numDims = TestUtil.nextInt(random(), 1, 7); List testDocs = getRandomDocs(tokens, numDocs, numDims); for(TestDoc testDoc : testDocs) { Document doc = new Document(); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java index 5561e6da188..6735905eadf 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java @@ -61,7 +61,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestTaxonomyFacetSumValueSource extends FacetTestCase { @@ -428,7 +428,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase { DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(taxoDir); FacetsConfig config = new FacetsConfig(); int numDocs = atLeast(1000); - int numDims = _TestUtil.nextInt(random(), 1, 7); + int numDims = TestUtil.nextInt(random(), 1, 7); List testDocs = getRandomDocs(tokens, numDocs, numDims); for(TestDoc testDoc : testDocs) { Document doc = new Document(); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java index c78b8d36ea6..f61e27338c9 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java @@ -12,7 +12,7 @@ import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter.Memory import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter.OrdinalMap; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -74,7 +74,7 @@ public class TestAddTaxonomy extends FacetTestCase { private OrdinalMap randomOrdinalMap() throws IOException { if (random().nextBoolean()) { - return new DiskOrdinalMap(_TestUtil.createTempFile("taxoMap", "", TEMP_DIR)); + return new DiskOrdinalMap(TestUtil.createTempFile("taxoMap", "", TEMP_DIR)); } else { return new MemoryOrdinalMap(); } @@ -160,8 +160,8 @@ public class TestAddTaxonomy extends FacetTestCase { Random random = random(); int numTests = atLeast(3); for (int i = 0; i < numTests; i++) { - dotest(_TestUtil.nextInt(random, 2, 100), - _TestUtil.nextInt(random, 100, 1000)); + dotest(TestUtil.nextInt(random, 2, 100), + TestUtil.nextInt(random, 100, 1000)); } } diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java index f00935ca9bb..a676b280112 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java @@ -29,7 +29,8 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Test; /* @@ -445,7 +446,7 @@ public class TestDirectoryTaxonomyWriter extends FacetTestCase { int ordinal = -1; int len = FacetLabel.MAX_CATEGORY_PATH_LENGTH - 4; // for the dimension and separator - bigs = _TestUtil.randomSimpleString(random(), len, len); + bigs = TestUtil.randomSimpleString(random(), len, len); FacetField ff = new FacetField("dim", bigs); FacetLabel cp = new FacetLabel("dim", bigs); ordinal = taxoWriter.addCategory(cp); @@ -455,7 +456,7 @@ public class TestDirectoryTaxonomyWriter extends FacetTestCase { // Add tiny ones to cause a re-hash for (int i = 0; i < 3; i++) { - String s = _TestUtil.randomSimpleString(random(), 1, 10); + String s = TestUtil.randomSimpleString(random(), 1, 10); taxoWriter.addCategory(new FacetLabel("dim", s)); doc = new Document(); doc.add(new FacetField("dim", s)); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCharBlockArray.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCharBlockArray.java index 3f5ec18ab86..799df78cf23 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCharBlockArray.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCharBlockArray.java @@ -11,7 +11,8 @@ import java.nio.charset.CodingErrorAction; import org.apache.lucene.facet.FacetTestCase; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Test; /* @@ -84,7 +85,7 @@ public class TestCharBlockArray extends FacetTestCase { assertEqualsInternal("GrowingCharArray<->StringBuilder mismatch.", builder, array); - File tempDir = _TestUtil.getTempDir("growingchararray"); + File tempDir = TestUtil.getTempDir("growingchararray"); File f = new File(tempDir, "GrowingCharArrayTest.tmp"); BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(f)); array.flush(out); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCompactLabelToOrdinal.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCompactLabelToOrdinal.java index f9f485bd2ea..715a80e7c74 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCompactLabelToOrdinal.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCompactLabelToOrdinal.java @@ -11,7 +11,8 @@ import java.util.Random; import org.apache.lucene.facet.FacetTestCase; import org.apache.lucene.facet.taxonomy.FacetLabel; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Test; /* @@ -67,7 +68,7 @@ public class TestCompactLabelToOrdinal extends FacetTestCase { } } - File tmpDir = _TestUtil.getTempDir("testLableToOrdinal"); + File tmpDir = TestUtil.getTempDir("testLableToOrdinal"); File f = new File(tmpDir, "CompactLabelToOrdinalTest.tmp"); int flushInterval = 10; diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java index 843e017f4cd..b8e3a7ffe58 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AbstractGroupingTestCase.java @@ -18,7 +18,7 @@ package org.apache.lucene.search.grouping; */ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Base class for grouping related tests. @@ -32,7 +32,7 @@ public abstract class AbstractGroupingTestCase extends LuceneTestCase { // B/c of DV based impl we can't see the difference between an empty string and a null value. // For that reason we don't generate empty string // groups. - randomValue = _TestUtil.randomRealisticUnicodeString(random()); + randomValue = TestUtil.randomRealisticUnicodeString(random()); //randomValue = _TestUtil.randomSimpleString(random()); } while ("".equals(randomValue)); return randomValue; diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java index d5eeaf62450..6d6dacdebce 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java @@ -46,7 +46,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import java.io.IOException; import java.util.ArrayList; @@ -178,14 +178,14 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { } public void testRandom() throws Exception { - int numberOfRuns = _TestUtil.nextInt(random(), 3, 6); + int numberOfRuns = TestUtil.nextInt(random(), 3, 6); for (int iter = 0; iter < numberOfRuns; iter++) { if (VERBOSE) { System.out.println(String.format(Locale.ROOT, "TEST: iter=%d total=%d", iter, numberOfRuns)); } - final int numDocs = _TestUtil.nextInt(random(), 100, 1000) * RANDOM_MULTIPLIER; - final int numGroups = _TestUtil.nextInt(random(), 1, numDocs); + final int numDocs = TestUtil.nextInt(random(), 100, 1000) * RANDOM_MULTIPLIER; + final int numGroups = TestUtil.nextInt(random(), 1, numDocs); if (VERBOSE) { System.out.println("TEST: numDocs=" + numDocs + " numGroups=" + numGroups); @@ -197,11 +197,11 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { do { // B/c of DV based impl we can't see the difference between an empty string and a null value. // For that reason we don't generate empty string groups. - randomValue = _TestUtil.randomRealisticUnicodeString(random()); + randomValue = TestUtil.randomRealisticUnicodeString(random()); } while ("".equals(randomValue)); groups.add(new BytesRef(randomValue)); } - final String[] contentStrings = new String[_TestUtil.nextInt(random(), 2, 20)]; + final String[] contentStrings = new String[TestUtil.nextInt(random(), 2, 20)]; if (VERBOSE) { System.out.println("TEST: create fake content"); } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java index 1218193777c..bcdcd1ab2e8 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java @@ -41,7 +41,7 @@ import org.apache.lucene.search.grouping.term.TermDistinctValuesCollector; import org.apache.lucene.search.grouping.term.TermFirstPassGroupingCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.mutable.MutableValue; import org.apache.lucene.util.mutable.MutableValueStr; @@ -246,7 +246,7 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase { public void testRandom() throws Exception { Random random = random(); - int numberOfRuns = _TestUtil.nextInt(random, 3, 6); + int numberOfRuns = TestUtil.nextInt(random, 3, 6); for (int indexIter = 0; indexIter < numberOfRuns; indexIter++) { IndexContext context = createIndexContext(); for (int searchIter = 0; searchIter < 100; searchIter++) { diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java index 9f89bf3e211..a6d43fca71c 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java @@ -33,7 +33,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.grouping.term.TermGroupFacetCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import java.io.IOException; import java.util.ArrayList; @@ -372,7 +372,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase { public void testRandom() throws Exception { Random random = random(); - int numberOfRuns = _TestUtil.nextInt(random, 3, 6); + int numberOfRuns = TestUtil.nextInt(random, 3, 6); for (int indexIter = 0; indexIter < numberOfRuns; indexIter++) { boolean multipleFacetsPerDocument = random.nextBoolean(); IndexContext context = createIndexContext(multipleFacetsPerDocument); @@ -478,9 +478,9 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase { private IndexContext createIndexContext(boolean multipleFacetValuesPerDocument) throws IOException { final Random random = random(); - final int numDocs = _TestUtil.nextInt(random, 138, 1145) * RANDOM_MULTIPLIER; - final int numGroups = _TestUtil.nextInt(random, 1, numDocs / 4); - final int numFacets = _TestUtil.nextInt(random, 1, numDocs / 6); + final int numDocs = TestUtil.nextInt(random, 138, 1145) * RANDOM_MULTIPLIER; + final int numGroups = TestUtil.nextInt(random, 1, numDocs / 4); + final int numFacets = TestUtil.nextInt(random, 1, numDocs / 6); if (VERBOSE) { System.out.println("TEST: numDocs=" + numDocs + " numGroups=" + numGroups); @@ -494,7 +494,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase { for (int i = 0; i < numFacets; i++) { facetValues.add(generateRandomNonEmptyString()); } - final String[] contentBrs = new String[_TestUtil.nextInt(random, 2, 20)]; + final String[] contentBrs = new String[TestUtil.nextInt(random, 2, 20)]; if (VERBOSE) { System.out.println("TEST: create fake content"); } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java index 2ccc59069d7..e740e678318 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java @@ -39,7 +39,7 @@ import org.apache.lucene.search.grouping.term.TermSecondPassGroupingCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.mutable.MutableValue; import org.apache.lucene.util.mutable.MutableValueStr; @@ -619,16 +619,16 @@ public class TestGrouping extends LuceneTestCase { } public void testRandom() throws Exception { - int numberOfRuns = _TestUtil.nextInt(random(), 3, 6); + int numberOfRuns = TestUtil.nextInt(random(), 3, 6); for (int iter=0; iter 0) { sb.append(' '); - sb.append((char)_TestUtil.nextInt(r, 'a', 'z')); + sb.append((char) TestUtil.nextInt(r, 'a', 'z')); } else { // capitalize the first word to help breakiterator - sb.append((char)_TestUtil.nextInt(r, 'A', 'Z')); + sb.append((char) TestUtil.nextInt(r, 'A', 'Z')); } } sb.append(". "); // finalize sentence diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldPhraseListTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldPhraseListTest.java index afab821da31..e0f5ad10598 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldPhraseListTest.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldPhraseListTest.java @@ -23,7 +23,8 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo; import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo.Toffs; import org.apache.lucene.search.vectorhighlight.FieldTermStack.TermInfo; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class FieldPhraseListTest extends AbstractTestCase { @@ -269,7 +270,7 @@ public class FieldPhraseListTest extends AbstractTestCase { private WeightedPhraseInfo newInfo( int startOffset, int endOffset, float boost ) { LinkedList< TermInfo > infos = new LinkedList< TermInfo >(); - infos.add( new TermInfo( _TestUtil.randomUnicodeString( random() ), startOffset, endOffset, 0, 0 ) ); + infos.add( new TermInfo( TestUtil.randomUnicodeString(random()), startOffset, endOffset, 0, 0 ) ); return new WeightedPhraseInfo( infos, boost ); } diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldTermStackTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldTermStackTest.java index 2a4c6739e59..6eef20f63ec 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldTermStackTest.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldTermStackTest.java @@ -21,7 +21,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.search.vectorhighlight.FieldTermStack.TermInfo; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class FieldTermStackTest extends AbstractTestCase { @@ -176,10 +176,10 @@ public class FieldTermStackTest extends AbstractTestCase { } public void testTermInfoComparisonConsistency() { - TermInfo a = new TermInfo( _TestUtil.randomUnicodeString( random() ), 0, 0, 0, 1 ); - TermInfo b = new TermInfo( _TestUtil.randomUnicodeString( random() ), 0, 0, 1, 1 ); - TermInfo c = new TermInfo( _TestUtil.randomUnicodeString( random() ), 0, 0, 2, 1 ); - TermInfo d = new TermInfo( _TestUtil.randomUnicodeString( random() ), 0, 0, 0, 1 ); + TermInfo a = new TermInfo( TestUtil.randomUnicodeString(random()), 0, 0, 0, 1 ); + TermInfo b = new TermInfo( TestUtil.randomUnicodeString(random()), 0, 0, 1, 1 ); + TermInfo c = new TermInfo( TestUtil.randomUnicodeString(random()), 0, 0, 2, 1 ); + TermInfo d = new TermInfo( TestUtil.randomUnicodeString(random()), 0, 0, 0, 1 ); assertConsistentEquals( a, a ); assertConsistentEquals( b, b ); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java index 3dbd12b6613..e15b32a377e 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java @@ -35,7 +35,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.highlight.SimpleHTMLEncoder; import org.apache.lucene.store.Directory; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import java.util.ArrayList; import java.util.HashMap; @@ -228,7 +228,7 @@ public class SimpleFragmentsBuilderTest extends AbstractTestCase { for (int i = 0; i < randomValues.length; i++) { String randomValue; do { - randomValue = _TestUtil.randomSimpleString(random()); + randomValue = TestUtil.randomSimpleString(random()); } while ("".equals(randomValue)); randomValues[i] = randomValue; } diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java index b788ea0f2bc..c7ddcdd4da0 100644 --- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java +++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java @@ -439,18 +439,18 @@ public class TestBlockJoin extends LuceneTestCase { private String[][] getRandomFields(int maxUniqueValues) { - final String[][] fields = new String[_TestUtil.nextInt(random(), 2, 4)][]; + final String[][] fields = new String[TestUtil.nextInt(random(), 2, 4)][]; for(int fieldID=0;fieldID joinResults = c.getTopGroups(childJoinQuery, childSort, 0, hitsPerGroup, 0, true); @@ -822,7 +822,7 @@ public class TestBlockJoin extends LuceneTestCase { } else if (random().nextInt(3) == 2) { BooleanQuery bq = new BooleanQuery(); parentQuery2 = bq; - final int numClauses = _TestUtil.nextInt(random(), 2, 4); + final int numClauses = TestUtil.nextInt(random(), 2, 4); boolean didMust = false; for(int clauseIDX=0;clauseIDX randomTerms = new HashSet(); while (randomTerms.size() < numTerms) { - randomTerms.add(_TestUtil.randomSimpleString(random())); + randomTerms.add(TestUtil.randomSimpleString(random())); } terms = new ArrayList(randomTerms); final long seed = random().nextLong(); diff --git a/lucene/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java b/lucene/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java index 4faef8feb3f..3ff868cacfc 100644 --- a/lucene/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java +++ b/lucene/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java @@ -28,7 +28,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -46,7 +46,7 @@ public class TestHighFreqTerms extends LuceneTestCase { .setMaxBufferedDocs(2)); indexDocs(writer); reader = DirectoryReader.open(dir); - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); } @AfterClass diff --git a/lucene/misc/src/test/org/apache/lucene/util/fst/TestFSTsMisc.java b/lucene/misc/src/test/org/apache/lucene/util/fst/TestFSTsMisc.java index ec395250bc1..caa5b5f9e6d 100644 --- a/lucene/misc/src/test/org/apache/lucene/util/fst/TestFSTsMisc.java +++ b/lucene/misc/src/test/org/apache/lucene/util/fst/TestFSTsMisc.java @@ -29,7 +29,8 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.fst.UpToTwoPositiveIntOutputs.TwoLongs; import static org.apache.lucene.util.fst.FSTTester.getRandomString; @@ -91,15 +92,15 @@ public class TestFSTsMisc extends LuceneTestCase { long lastOutput = 0; for(int idx=0;idx values = new ArrayList(); values.add(value); @@ -137,13 +138,13 @@ public class TestFSTsMisc extends LuceneTestCase { long lastOutput = 0; for(int idx=0;idx values = new ArrayList(); for(int i=0;i terms = new ArrayList(); for (int i = 0; i < num; i++) { String field = "field" + i; - String string = _TestUtil.randomRealisticUnicodeString(random()); + String string = TestUtil.randomRealisticUnicodeString(random()); terms.add(new Term(field, string)); Document doc = new Document(); doc.add(newStringField(field, string, Field.Store.NO)); @@ -122,7 +122,7 @@ public class TermFilterTest extends LuceneTestCase { for (int i = 0; i < num; i++) { String field1 = "field" + i; String field2 = "field" + i + num; - String value1 = _TestUtil.randomRealisticUnicodeString(random()); + String value1 = TestUtil.randomRealisticUnicodeString(random()); String value2 = value1 + "x"; // this must be not equal to value1 TermFilter filter1 = termFilter(field1, value1); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java b/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java index c0c161e47c8..2a071180639 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java @@ -49,7 +49,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; public class TermsFilterTest extends LuceneTestCase { @@ -213,7 +214,7 @@ public class TermsFilterTest extends LuceneTestCase { List terms = new ArrayList(); for (int i = 0; i < num; i++) { String field = "field" + (singleField ? "1" : random().nextInt(100)); - String string = _TestUtil.randomRealisticUnicodeString(random()); + String string = TestUtil.randomRealisticUnicodeString(random()); terms.add(new Term(field, string)); Document doc = new Document(); doc.add(newStringField(field, string, Field.Store.YES)); @@ -279,7 +280,7 @@ public class TermsFilterTest extends LuceneTestCase { Set uniqueTerms = new HashSet(); for (int i = 0; i < num; i++) { String field = "field" + (singleField ? "1" : random().nextInt(100)); - String string = _TestUtil.randomRealisticUnicodeString(random()); + String string = TestUtil.randomRealisticUnicodeString(random()); terms.add(new Term(field, string)); uniqueTerms.add(new Term(field, string)); TermsFilter left = termsFilter(singleField ? random().nextBoolean() : false, uniqueTerms); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java b/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java index 4f3c0126458..76c64b8d62c 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java @@ -21,7 +21,7 @@ import org.apache.lucene.search.FieldCache; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.Ignore; @@ -113,7 +113,7 @@ public abstract class FunctionTestSetup extends LuceneTestCase { anlzr = new MockAnalyzer(random()); IndexWriterConfig iwc = newIndexWriterConfig( TEST_VERSION_CURRENT, anlzr).setMergePolicy(newLogMergePolicy()); if (doMultiSegment) { - iwc.setMaxBufferedDocs(_TestUtil.nextInt(random(), 2, 7)); + iwc.setMaxBufferedDocs(TestUtil.nextInt(random(), 2, 7)); } RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); // add docs not exactly in natural ID order, to verify we do check the order of docs by scores diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestDocValuesFieldSources.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestDocValuesFieldSources.java index 3d2a1a2e066..95bc62a6911 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestDocValuesFieldSources.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestDocValuesFieldSources.java @@ -35,7 +35,8 @@ import org.apache.lucene.queries.function.valuesource.LongFieldSource; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.packed.PackedInts; import com.carrotsearch.randomizedtesting.generators.RandomInts; @@ -75,7 +76,7 @@ public class TestDocValuesFieldSources extends LuceneTestCase { case SORTED: case BINARY: do { - vals[i] = _TestUtil.randomSimpleString(random(), 20); + vals[i] = TestUtil.randomSimpleString(random(), 20); } while (((String) vals[i]).isEmpty()); f.setBytesValue(new BytesRef((String) vals[i])); break; diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java index 0a9ba028ca4..a209ce22b46 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java @@ -52,7 +52,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -190,7 +190,7 @@ public class TestNumericQueryParser extends LuceneTestCase { directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) - .setMaxBufferedDocs(_TestUtil.nextInt(random(), 50, 1000)) + .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)) .setMergePolicy(newLogMergePolicy())); Document doc = new Document(); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java index c852a069296..7b55a53d467 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/simple/TestSimpleQueryParser.java @@ -34,7 +34,8 @@ import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.automaton.LevenshteinAutomata; import static org.apache.lucene.queryparser.simple.SimpleQueryParser.AND_OPERATOR; @@ -623,9 +624,9 @@ public class TestSimpleQueryParser extends LuceneTestCase { // we aren't supposed to barf on any input... public void testRandomQueries() throws Exception { for (int i = 0; i < 1000; i++) { - String query = _TestUtil.randomUnicodeString(random()); + String query = TestUtil.randomUnicodeString(random()); parse(query); // no exception - parseKeyword(query, _TestUtil.nextInt(random(), 0, 1024)); // no exception + parseKeyword(query, TestUtil.nextInt(random(), 0, 1024)); // no exception } } @@ -639,7 +640,7 @@ public class TestSimpleQueryParser extends LuceneTestCase { sb.append(chars[random().nextInt(chars.length)]); } parse(sb.toString()); // no exception - parseKeyword(sb.toString(), _TestUtil.nextInt(random(), 0, 1024)); // no exception + parseKeyword(sb.toString(), TestUtil.nextInt(random(), 0, 1024)); // no exception } } } \ No newline at end of file diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java index c5c4f93b4aa..0462ce01f93 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java @@ -48,8 +48,8 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.ThreadInterruptedException; -import org.apache.lucene.util._TestUtil; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -91,7 +91,7 @@ public class IndexAndTaxonomyReplicationClientTest extends ReplicatorTestCase { indexReader.close(); indexReader = newReader; lastIndexGeneration = newGeneration; - _TestUtil.checkIndex(indexDir); + TestUtil.checkIndex(indexDir); // verify taxonomy index DirectoryTaxonomyReader newTaxoReader = TaxonomyReader.openIfChanged(taxoReader); @@ -99,7 +99,7 @@ public class IndexAndTaxonomyReplicationClientTest extends ReplicatorTestCase { taxoReader.close(); taxoReader = newTaxoReader; } - _TestUtil.checkIndex(taxoDir); + TestUtil.checkIndex(taxoDir); // verify faceted search int id = Integer.parseInt(indexReader.getIndexCommit().getUserData().get(VERSION_ID), 16); @@ -191,7 +191,7 @@ public class IndexAndTaxonomyReplicationClientTest extends ReplicatorTestCase { publishTaxoDir = newDirectory(); handlerIndexDir = newMockDirectory(); handlerTaxoDir = newMockDirectory(); - clientWorkDir = _TestUtil.getTempDir("replicationClientTest"); + clientWorkDir = TestUtil.getTempDir("replicationClientTest"); sourceDirFactory = new PerSessionDirectoryFactory(clientWorkDir); replicator = new LocalReplicator(); callback = new IndexAndTaxonomyReadyCallback(handlerIndexDir, handlerTaxoDir); @@ -399,11 +399,11 @@ public class IndexAndTaxonomyReplicationClientTest extends ReplicatorTestCase { reader.close(); } // verify index is fully consistent - _TestUtil.checkIndex(handlerIndexDir.getDelegate()); + TestUtil.checkIndex(handlerIndexDir.getDelegate()); // verify taxonomy index is fully consistent (since we only add one // category to all documents, there's nothing much more to validate - _TestUtil.checkIndex(handlerTaxoDir.getDelegate()); + TestUtil.checkIndex(handlerTaxoDir.getDelegate()); } catch (IOException e) { throw new RuntimeException(e); } finally { diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java index 0df349005bd..76e92e8e0d8 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java @@ -33,8 +33,8 @@ import org.apache.lucene.replicator.ReplicationClient.SourceDirectoryFactory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.ThreadInterruptedException; -import org.apache.lucene.util._TestUtil; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -68,7 +68,7 @@ public class IndexReplicationClientTest extends ReplicatorTestCase { reader.close(); reader = newReader; lastGeneration = newGeneration; - _TestUtil.checkIndex(indexDir); + TestUtil.checkIndex(indexDir); } return null; } @@ -136,7 +136,7 @@ public class IndexReplicationClientTest extends ReplicatorTestCase { super.setUp(); publishDir = newMockDirectory(); handlerDir = newMockDirectory(); - sourceDirFactory = new PerSessionDirectoryFactory(_TestUtil.getTempDir("replicationClientTest")); + sourceDirFactory = new PerSessionDirectoryFactory(TestUtil.getTempDir("replicationClientTest")); replicator = new LocalReplicator(); callback = new IndexReadyCallback(handlerDir); handler = new IndexReplicationHandler(handlerDir, callback); @@ -305,7 +305,7 @@ public class IndexReplicationClientTest extends ReplicatorTestCase { reader.close(); } // verify index consistency - _TestUtil.checkIndex(handlerDir.getDelegate()); + TestUtil.checkIndex(handlerDir.getDelegate()); } catch (IOException e) { // exceptions here are bad, don't ignore them throw new RuntimeException(e); diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java index 46b59424fb0..b549f7f6e2b 100644 --- a/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java +++ b/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java @@ -35,7 +35,7 @@ import org.apache.lucene.replicator.Replicator; import org.apache.lucene.replicator.ReplicatorTestCase; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.servlet.ServletHandler; import org.eclipse.jetty.servlet.ServletHolder; @@ -68,7 +68,7 @@ public class HttpReplicatorTest extends ReplicatorTestCase { public void setUp() throws Exception { super.setUp(); System.setProperty("org.eclipse.jetty.LEVEL", "DEBUG"); // sets stderr logging to DEBUG level - clientWorkDir = _TestUtil.getTempDir("httpReplicatorTest"); + clientWorkDir = TestUtil.getTempDir("httpReplicatorTest"); handlerIndexDir = newDirectory(); serverIndexDir = newDirectory(); serverReplicator = new LocalReplicator(); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java index 2d9040b4ba7..260e85bb7f8 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class DuplicateFilterTest extends LuceneTestCase { private static final String KEY_FIELD = "url"; @@ -134,12 +134,12 @@ public class DuplicateFilterTest extends LuceneTestCase { for (ScoreDoc hit : hits) { StoredDocument d = searcher.doc(hit.doc); String url = d.get(KEY_FIELD); - DocsEnum td = _TestUtil.docs(random(), reader, - KEY_FIELD, - new BytesRef(url), - MultiFields.getLiveDocs(reader), - null, - 0); + DocsEnum td = TestUtil.docs(random(), reader, + KEY_FIELD, + new BytesRef(url), + MultiFields.getLiveDocs(reader), + null, + 0); int lastDoc = 0; while (td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { @@ -158,12 +158,12 @@ public class DuplicateFilterTest extends LuceneTestCase { for (ScoreDoc hit : hits) { StoredDocument d = searcher.doc(hit.doc); String url = d.get(KEY_FIELD); - DocsEnum td = _TestUtil.docs(random(), reader, - KEY_FIELD, - new BytesRef(url), - MultiFields.getLiveDocs(reader), - null, - 0); + DocsEnum td = TestUtil.docs(random(), reader, + KEY_FIELD, + new BytesRef(url), + MultiFields.getLiveDocs(reader), + null, + 0); int lastDoc = 0; td.nextDoc(); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java index b5561696e51..6b485f66fd6 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java @@ -11,7 +11,7 @@ import org.apache.lucene.search.*; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -55,12 +55,12 @@ public class TestSlowCollationMethods extends LuceneTestCase { RandomIndexWriter iw = new RandomIndexWriter(random(), dir); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - String value = _TestUtil.randomUnicodeString(random()); + String value = TestUtil.randomUnicodeString(random()); Field field = newStringField("field", value, Field.Store.YES); doc.add(field); iw.addDocument(doc); } - splitDoc = _TestUtil.randomUnicodeString(random()); + splitDoc = TestUtil.randomUnicodeString(random()); reader = iw.getReader(); iw.close(); @@ -130,8 +130,8 @@ public class TestSlowCollationMethods extends LuceneTestCase { public void testRangeQuery() throws Exception { int numQueries = 50*RANDOM_MULTIPLIER; for (int i = 0; i < numQueries; i++) { - String startPoint = _TestUtil.randomUnicodeString(random()); - String endPoint = _TestUtil.randomUnicodeString(random()); + String startPoint = TestUtil.randomUnicodeString(random()); + String endPoint = TestUtil.randomUnicodeString(random()); Query query = new SlowCollatedTermRangeQuery("field", startPoint, endPoint, true, true, collator); doTestRanges(startPoint, endPoint, query); } @@ -140,8 +140,8 @@ public class TestSlowCollationMethods extends LuceneTestCase { public void testRangeFilter() throws Exception { int numQueries = 50*RANDOM_MULTIPLIER; for (int i = 0; i < numQueries; i++) { - String startPoint = _TestUtil.randomUnicodeString(random()); - String endPoint = _TestUtil.randomUnicodeString(random()); + String startPoint = TestUtil.randomUnicodeString(random()); + String endPoint = TestUtil.randomUnicodeString(random()); Query query = new ConstantScoreQuery(new SlowCollatedTermRangeFilter("field", startPoint, endPoint, true, true, collator)); doTestRanges(startPoint, endPoint, query); } @@ -162,7 +162,7 @@ public class TestSlowCollationMethods extends LuceneTestCase { RandomIndexWriter iw = new RandomIndexWriter(random(), dir); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - String value = _TestUtil.randomUnicodeString(random()); + String value = TestUtil.randomUnicodeString(random()); Field field = newStringField("field", value, Field.Store.YES); doc.add(field); iw.addDocument(doc); @@ -172,8 +172,8 @@ public class TestSlowCollationMethods extends LuceneTestCase { IndexSearcher searcher = newSearcher(reader); - String startPoint = _TestUtil.randomUnicodeString(random()); - String endPoint = _TestUtil.randomUnicodeString(random()); + String startPoint = TestUtil.randomUnicodeString(random()); + String endPoint = TestUtil.randomUnicodeString(random()); Query query = new SlowCollatedTermRangeQuery("field", startPoint, endPoint, true, true, collator); QueryUtils.check(random(), query, searcher); reader.close(); diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java index f1b7f8f625f..51e4bc6b1c3 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java @@ -34,7 +34,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.After; import org.junit.Before; @@ -72,7 +72,7 @@ public abstract class SpatialTestCase extends LuceneTestCase { final IndexWriterConfig indexWriterConfig = LuceneTestCase.newIndexWriterConfig(random, LuceneTestCase.TEST_VERSION_CURRENT, new MockAnalyzer(random)); //TODO can we randomly choose a doc-values supported format? if (needsDocValues()) - indexWriterConfig.setCodec( _TestUtil.alwaysDocValuesFormat(new Lucene45DocValuesFormat()));; + indexWriterConfig.setCodec( TestUtil.alwaysDocValuesFormat(new Lucene45DocValuesFormat()));; return indexWriterConfig; } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java index 58a8807eef0..e5dfc45d140 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java @@ -35,7 +35,7 @@ import org.apache.lucene.search.spell.WordBreakSpellChecker.BreakSuggestionSortM import org.apache.lucene.store.Directory; import org.apache.lucene.util.English; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestWordBreakSpellChecker extends LuceneTestCase { private Directory dir = null; @@ -262,7 +262,7 @@ public class TestWordBreakSpellChecker extends LuceneTestCase { } } public void testRandom() throws Exception { - int numDocs = _TestUtil.nextInt(random(), (10 * RANDOM_MULTIPLIER), + int numDocs = TestUtil.nextInt(random(), (10 * RANDOM_MULTIPLIER), (100 * RANDOM_MULTIPLIER)); Directory dir = null; RandomIndexWriter writer = null; @@ -271,24 +271,24 @@ public class TestWordBreakSpellChecker extends LuceneTestCase { dir = newDirectory(); writer = new RandomIndexWriter(random(), dir, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); - int maxLength = _TestUtil.nextInt(random(), 5, 50); + int maxLength = TestUtil.nextInt(random(), 5, 50); List originals = new ArrayList(numDocs); List breaks = new ArrayList(numDocs); for (int i = 0; i < numDocs; i++) { String orig = ""; if (random().nextBoolean()) { while (!goodTestString(orig)) { - orig = _TestUtil.randomSimpleString(random(), maxLength); + orig = TestUtil.randomSimpleString(random(), maxLength); } } else { while (!goodTestString(orig)) { - orig = _TestUtil.randomUnicodeString(random(), maxLength); + orig = TestUtil.randomUnicodeString(random(), maxLength); } } originals.add(orig); int totalLength = orig.codePointCount(0, orig.length()); int breakAt = orig.offsetByCodePoints(0, - _TestUtil.nextInt(random(), 1, totalLength - 1)); + TestUtil.nextInt(random(), 1, totalLength - 1)); String[] broken = new String[2]; broken[0] = orig.substring(0, breakAt); broken[1] = orig.substring(breakAt); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java index 665af98d46f..ba4e675dcfa 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java @@ -10,7 +10,8 @@ import java.util.Map; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Test; @@ -36,18 +37,18 @@ public class FileDictionaryTest extends LuceneTestCase { private Map.Entry, String> generateFileEntry(String fieldDelimiter, boolean hasWeight, boolean hasPayload) { List entryValues = new ArrayList<>(); StringBuilder sb = new StringBuilder(); - String term = _TestUtil.randomSimpleString(random(), 1, 300); + String term = TestUtil.randomSimpleString(random(), 1, 300); sb.append(term); entryValues.add(term); if (hasWeight) { sb.append(fieldDelimiter); - long weight = _TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE); + long weight = TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE); sb.append(weight); entryValues.add(String.valueOf(weight)); } if (hasPayload) { sb.append(fieldDelimiter); - String payload = _TestUtil.randomSimpleString(random(), 1, 300); + String payload = TestUtil.randomSimpleString(random(), 1, 300); sb.append(payload); entryValues.add(payload); } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java index 48742342fe6..16ee899ac09 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java @@ -33,7 +33,6 @@ import java.util.concurrent.Callable; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.search.suggest.Lookup; // javadocs import org.apache.lucene.search.suggest.analyzing.AnalyzingInfixSuggester; import org.apache.lucene.search.suggest.analyzing.AnalyzingSuggester; import org.apache.lucene.search.suggest.analyzing.FuzzySuggester; @@ -162,7 +161,7 @@ public class LookupBenchmarkTest extends LuceneTestCase { } catch (InstantiationException e) { Analyzer a = new MockAnalyzer(random, MockTokenizer.KEYWORD, false); if (cls == AnalyzingInfixSuggester.class) { - lookup = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, _TestUtil.getTempDir("LookupBenchmarkTest"), a); + lookup = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, TestUtil.getTempDir("LookupBenchmarkTest"), a); } else { Constructor ctor = cls.getConstructor(Analyzer.class); lookup = ctor.newInstance(a); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java index f98f90fbe3a..3a29effb556 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java @@ -22,13 +22,12 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.util.List; -import org.apache.lucene.search.suggest.Lookup; import org.apache.lucene.search.suggest.Lookup.LookupResult; import org.apache.lucene.search.suggest.fst.FSTCompletionLookup; import org.apache.lucene.search.suggest.jaspell.JaspellLookup; import org.apache.lucene.search.suggest.tst.TSTLookup; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class PersistenceTest extends LuceneTestCase { public final String[] keys = new String[] { @@ -82,7 +81,7 @@ public class PersistenceTest extends LuceneTestCase { Random random = random(); long previous = Long.MIN_VALUE; for (Input k : keys) { - List list = lookup.lookup(_TestUtil.bytesToCharSequence(k.term, random), false, 1); + List list = lookup.lookup(TestUtil.bytesToCharSequence(k.term, random), false, 1); assertEquals(1, list.size()); LookupResult lookupResult = list.get(0); assertNotNull(k.term.utf8ToString(), lookupResult.key); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestBytesRefArray.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestBytesRefArray.java index 49bc12d9581..935b71bc529 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestBytesRefArray.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestBytesRefArray.java @@ -24,7 +24,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.Counter; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestBytesRefArray extends LuceneTestCase { @@ -41,7 +41,7 @@ public class TestBytesRefArray extends LuceneTestCase { BytesRef spare = new BytesRef(); int initSize = list.size(); for (int i = 0; i < entries; i++) { - String randomRealisticUnicodeString = _TestUtil + String randomRealisticUnicodeString = TestUtil .randomRealisticUnicodeString(random); spare.copyChars(randomRealisticUnicodeString); assertEquals(i+initSize, list.append(spare)); @@ -84,7 +84,7 @@ public class TestBytesRefArray extends LuceneTestCase { BytesRef spare = new BytesRef(); final int initSize = list.size(); for (int i = 0; i < entries; i++) { - String randomRealisticUnicodeString = _TestUtil + String randomRealisticUnicodeString = TestUtil .randomRealisticUnicodeString(random); spare.copyChars(randomRealisticUnicodeString); assertEquals(initSize + i, list.append(spare)); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestInputIterator.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestInputIterator.java index b0c423d753c..ae099780fe9 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestInputIterator.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestInputIterator.java @@ -24,12 +24,9 @@ import java.util.Map; import java.util.Random; import java.util.TreeMap; -import org.apache.lucene.store.ByteArrayDataOutput; -import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefHash; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestInputIterator extends LuceneTestCase { @@ -55,8 +52,8 @@ public class TestInputIterator extends LuceneTestCase { BytesRef key; BytesRef payload; do { - key = new BytesRef(_TestUtil.randomUnicodeString(random)); - payload = new BytesRef(_TestUtil.randomUnicodeString(random)); + key = new BytesRef(TestUtil.randomUnicodeString(random)); + payload = new BytesRef(TestUtil.randomUnicodeString(random)); } while (sorted.containsKey(key)); long value = random.nextLong(); sortedWithoutPayload.put(key, value); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java index 93842671bbe..4981b33800c 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java @@ -19,7 +19,6 @@ package org.apache.lucene.search.suggest.analyzing; import java.io.File; import java.io.IOException; -import java.io.Reader; import java.io.StringReader; import java.util.ArrayList; import java.util.List; @@ -40,7 +39,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; // Test requires postings offsets: @SuppressCodecs({"Lucene3x","MockFixedIntBlock","MockVariableIntBlock","MockSep","MockRandom"}) @@ -52,7 +51,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), }; - File tempDir = _TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a, 3) { @@ -63,7 +62,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { }; suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(_TestUtil.stringToCharSequence("ear", random()), 10, true, true); + List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); assertEquals(2, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); assertEquals(10, results.get(0).value); @@ -73,19 +72,19 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertEquals(8, results.get(1).value); assertEquals(new BytesRef("foobar"), results.get(1).payload); - results = suggester.lookup(_TestUtil.stringToCharSequence("ear ", random()), 10, true, true); + results = suggester.lookup(TestUtil.stringToCharSequence("ear ", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("lend me your ear", results.get(0).key); assertEquals(8, results.get(0).value); assertEquals(new BytesRef("foobar"), results.get(0).payload); - results = suggester.lookup(_TestUtil.stringToCharSequence("pen", random()), 10, true, true); + results = suggester.lookup(TestUtil.stringToCharSequence("pen", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); assertEquals(10, results.get(0).value); assertEquals(new BytesRef("foobaz"), results.get(0).payload); - results = suggester.lookup(_TestUtil.stringToCharSequence("p", random()), 10, true, true); + results = suggester.lookup(TestUtil.stringToCharSequence("p", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); assertEquals(10, results.get(0).value); @@ -100,7 +99,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), }; - File tempDir = _TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a, 3) { @@ -119,7 +118,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { return newFSDirectory(path); } }; - List results = suggester.lookup(_TestUtil.stringToCharSequence("ear", random()), 10, true, true); + List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); assertEquals(2, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); assertEquals(10, results.get(0).value); @@ -156,7 +155,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), }; - File tempDir = _TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a, 3) { @@ -208,7 +207,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { }; suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(_TestUtil.stringToCharSequence("ear", random()), 10, true, true); + List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("a penny saved is a penny earned", toString((List) results.get(0).highlightKey)); assertEquals(10, results.get(0).value); @@ -237,7 +236,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), }; - File tempDir = _TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); int minPrefixLength = random().nextInt(10); @@ -253,7 +252,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { for(int j=0;j<2;j++) { boolean doHighlight = j == 0; - List results = suggester.lookup(_TestUtil.stringToCharSequence("ear", random()), 10, true, doHighlight); + List results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, doHighlight); assertEquals(2, results.size()); if (doHighlight) { assertEquals("a penny saved is a penny earned", results.get(0).key); @@ -270,7 +269,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertEquals(8, results.get(1).value); assertEquals(new BytesRef("foobar"), results.get(1).payload); - results = suggester.lookup(_TestUtil.stringToCharSequence("ear ", random()), 10, true, doHighlight); + results = suggester.lookup(TestUtil.stringToCharSequence("ear ", random()), 10, true, doHighlight); assertEquals(1, results.size()); if (doHighlight) { assertEquals("lend me your ear", results.get(0).key); @@ -280,7 +279,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertEquals(8, results.get(0).value); assertEquals(new BytesRef("foobar"), results.get(0).payload); - results = suggester.lookup(_TestUtil.stringToCharSequence("pen", random()), 10, true, doHighlight); + results = suggester.lookup(TestUtil.stringToCharSequence("pen", random()), 10, true, doHighlight); assertEquals(1, results.size()); if (doHighlight) { assertEquals("a penny saved is a penny earned", results.get(0).key); @@ -290,7 +289,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { assertEquals(10, results.get(0).value); assertEquals(new BytesRef("foobaz"), results.get(0).payload); - results = suggester.lookup(_TestUtil.stringToCharSequence("p", random()), 10, true, doHighlight); + results = suggester.lookup(TestUtil.stringToCharSequence("p", random()), 10, true, doHighlight); assertEquals(1, results.size()); if (doHighlight) { assertEquals("a penny saved is a penny earned", results.get(0).key); @@ -318,7 +317,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), }; - File tempDir = _TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a, 3) { @@ -328,7 +327,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } }; suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(_TestUtil.stringToCharSequence("penn", random()), 10, true, true); + List results = suggester.lookup(TestUtil.stringToCharSequence("penn", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("a penny saved is a penny earned", results.get(0).key); suggester.close(); @@ -339,7 +338,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { new Input("a Penny saved is a penny earned", 10, new BytesRef("foobaz")), }; - File tempDir = _TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a, 3) { @@ -349,7 +348,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } }; suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(_TestUtil.stringToCharSequence("penn", random()), 10, true, true); + List results = suggester.lookup(TestUtil.stringToCharSequence("penn", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("a Penny saved is a penny earned", results.get(0).key); suggester.close(); @@ -370,7 +369,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } }; suggester.build(new InputArrayIterator(keys)); - results = suggester.lookup(_TestUtil.stringToCharSequence("penn", random()), 10, true, true); + results = suggester.lookup(TestUtil.stringToCharSequence("penn", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("a Penny saved is a penny earned", results.get(0).key); suggester.close(); @@ -381,7 +380,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz")), }; - File tempDir = _TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a, 3) { @@ -415,7 +414,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { } }; - File tempDir = _TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("AnalyzingInfixSuggesterTest"); AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, tempDir, indexAnalyzer, queryAnalyzer, 3) { @Override @@ -429,7 +428,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { }; suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(_TestUtil.stringToCharSequence("a", random()), 10, true, true); + List results = suggester.lookup(TestUtil.stringToCharSequence("a", random()), 10, true, true); assertEquals(1, results.size()); assertEquals("a bob for apples", results.get(0).key); suggester.close(); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java index 13a344aa2dd..ce8fbea6d0e 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java @@ -25,14 +25,12 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.TreeSet; @@ -57,7 +55,7 @@ import org.apache.lucene.search.suggest.InputArrayIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class AnalyzingSuggesterTest extends LuceneTestCase { @@ -77,20 +75,20 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { suggester.build(new InputArrayIterator(keys)); // top N of 2, but only foo is available - List results = suggester.lookup(_TestUtil.stringToCharSequence("f", random()), false, 2); + List results = suggester.lookup(TestUtil.stringToCharSequence("f", random()), false, 2); assertEquals(1, results.size()); assertEquals("foo", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); // top N of 1 for 'bar': we return this even though // barbar is higher because exactFirst is enabled: - results = suggester.lookup(_TestUtil.stringToCharSequence("bar", random()), false, 1); + results = suggester.lookup(TestUtil.stringToCharSequence("bar", random()), false, 1); assertEquals(1, results.size()); assertEquals("bar", results.get(0).key.toString()); assertEquals(10, results.get(0).value, 0.01F); // top N Of 2 for 'b' - results = suggester.lookup(_TestUtil.stringToCharSequence("b", random()), false, 2); + results = suggester.lookup(TestUtil.stringToCharSequence("b", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -98,7 +96,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals(10, results.get(1).value, 0.01F); // top N of 3 for 'ba' - results = suggester.lookup(_TestUtil.stringToCharSequence("ba", random()), false, 3); + results = suggester.lookup(TestUtil.stringToCharSequence("ba", random()), false, 3); assertEquals(3, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -121,7 +119,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { suggester.build(new InputArrayIterator(keys)); for (int i = 0; i < 2; i++) { // top N of 2, but only foo is available - List results = suggester.lookup(_TestUtil.stringToCharSequence("f", random()), false, 2); + List results = suggester.lookup(TestUtil.stringToCharSequence("f", random()), false, 2); assertEquals(1, results.size()); assertEquals("foo", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); @@ -129,14 +127,14 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { // top N of 1 for 'bar': we return this even though // barbar is higher because exactFirst is enabled: - results = suggester.lookup(_TestUtil.stringToCharSequence("bar", random()), false, 1); + results = suggester.lookup(TestUtil.stringToCharSequence("bar", random()), false, 1); assertEquals(1, results.size()); assertEquals("bar", results.get(0).key.toString()); assertEquals(10, results.get(0).value, 0.01F); assertEquals(new BytesRef("goodbye"), results.get(0).payload); // top N Of 2 for 'b' - results = suggester.lookup(_TestUtil.stringToCharSequence("b", random()), false, 2); + results = suggester.lookup(TestUtil.stringToCharSequence("b", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -146,7 +144,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals(new BytesRef("goodbye"), results.get(1).payload); // top N of 3 for 'ba' - results = suggester.lookup(_TestUtil.stringToCharSequence("ba", random()), false, 3); + results = suggester.lookup(TestUtil.stringToCharSequence("ba", random()), false, 3); assertEquals(3, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -218,19 +216,19 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(_TestUtil.stringToCharSequence("the ghost of chris", random()), false, 1); + List results = suggester.lookup(TestUtil.stringToCharSequence("the ghost of chris", random()), false, 1); assertEquals(1, results.size()); assertEquals("the ghost of christmas past", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); // omit the 'the' since its a stopword, its suggested anyway - results = suggester.lookup(_TestUtil.stringToCharSequence("ghost of chris", random()), false, 1); + results = suggester.lookup(TestUtil.stringToCharSequence("ghost of chris", random()), false, 1); assertEquals(1, results.size()); assertEquals("the ghost of christmas past", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); // omit the 'the' and 'of' since they are stopwords, its suggested anyway - results = suggester.lookup(_TestUtil.stringToCharSequence("ghost chris", random()), false, 1); + results = suggester.lookup(TestUtil.stringToCharSequence("ghost chris", random()), false, 1); assertEquals(1, results.size()); assertEquals("the ghost of christmas past", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); @@ -260,7 +258,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { // pass, and more generally if the analyzer can know // that the user's current query has ended at a word, // but, analyzers don't produce SEP tokens! - List r = suggester.lookup(_TestUtil.stringToCharSequence("ab c", random()), false, 2); + List r = suggester.lookup(TestUtil.stringToCharSequence("ab c", random()), false, 2); assertEquals(2, r.size()); // With no PRESERVE_SEPS specified, "ab c" should also @@ -674,7 +672,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { } for (int i = 0; i < numQueries; i++) { - int numTokens = _TestUtil.nextInt(random(), 1, 4); + int numTokens = TestUtil.nextInt(random(), 1, 4); String key; String analyzedKey; while(true) { @@ -686,7 +684,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { while (true) { // TODO: would be nice to fix this slowCompletor/comparator to // use full range, but we might lose some coverage too... - s = _TestUtil.randomSimpleString(random()); + s = TestUtil.randomSimpleString(random()); if (s.length() > 0) { if (token > 0) { key += " "; @@ -766,8 +764,8 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { System.out.println("\nTEST: prefix=" + prefix); } - final int topN = _TestUtil.nextInt(random(), 1, 10); - List r = suggester.lookup(_TestUtil.stringToCharSequence(prefix, random()), false, topN); + final int topN = TestUtil.nextInt(random(), 1, 10); + List r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random()), false, topN); // 2. go thru whole set to find suggestions: List matches = new ArrayList(); @@ -923,7 +921,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals(3, results.get(2).value); // Try again after save/load: - File tmpDir = _TestUtil.getTempDir("AnalyzingSuggesterTest"); + File tmpDir = TestUtil.getTempDir("AnalyzingSuggesterTest"); tmpDir.mkdir(); File path = new File(tmpDir, "suggester"); @@ -985,7 +983,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals(5, results.get(1).value); // Try again after save/load: - File tmpDir = _TestUtil.getTempDir("AnalyzingSuggesterTest"); + File tmpDir = TestUtil.getTempDir("AnalyzingSuggesterTest"); tmpDir.mkdir(); File path = new File(tmpDir, "suggester"); @@ -1055,7 +1053,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { assertEquals(5, results.get(1).value); // Try again after save/load: - File tmpDir = _TestUtil.getTempDir("AnalyzingSuggesterTest"); + File tmpDir = TestUtil.getTempDir("AnalyzingSuggesterTest"); tmpDir.mkdir(); File path = new File(tmpDir, "suggester"); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java index 4f6af6267bf..b88e3e32a2d 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.suggest.Lookup; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import java.io.File; import java.io.IOException; @@ -46,7 +46,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { new Input("star wars: episode v - the empire strikes back", 8, payload) }; - File tempDir = _TestUtil.getTempDir("BlendedInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("BlendedInfixSuggesterTest"); Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET); BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a, @@ -90,7 +90,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { new Input("top of the lake", w, pl) }; - File tempDir = _TestUtil.getTempDir("BlendedInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("BlendedInfixSuggesterTest"); Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET); // BlenderType.LINEAR is used by default (remove position*10%) @@ -141,7 +141,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { new Input("the returned", 10, ret), }; - File tempDir = _TestUtil.getTempDir("BlendedInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("BlendedInfixSuggesterTest"); Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET); // if factor is small, we don't get the expected element @@ -201,7 +201,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase { new Input("the returned", 10, ret), }; - File tempDir = _TestUtil.getTempDir("BlendedInfixSuggesterTest"); + File tempDir = TestUtil.getTempDir("BlendedInfixSuggesterTest"); Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET); // if factor is small, we don't get the expected element diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java index ab08ff40a59..daac881e424 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java @@ -46,7 +46,7 @@ import org.apache.lucene.search.suggest.InputArrayIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.State; import org.apache.lucene.util.fst.Util; @@ -57,7 +57,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { List keys = new ArrayList(); int numTerms = atLeast(100); for (int i = 0; i < numTerms; i++) { - keys.add(new Input("boo" + _TestUtil.randomSimpleString(random()), 1 + random().nextInt(100))); + keys.add(new Input("boo" + TestUtil.randomSimpleString(random()), 1 + random().nextInt(100))); } keys.add(new Input("foo bar boo far", 12)); MockAnalyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); @@ -67,7 +67,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { String addRandomEdit = addRandomEdit("foo bar boo", FuzzySuggester.DEFAULT_NON_FUZZY_PREFIX); - List results = suggester.lookup(_TestUtil.stringToCharSequence(addRandomEdit, random()), false, 2); + List results = suggester.lookup(TestUtil.stringToCharSequence(addRandomEdit, random()), false, 2); assertEquals(addRandomEdit, 1, results.size()); assertEquals("foo bar boo far", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -78,7 +78,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { List keys = new ArrayList(); int numTerms = atLeast(100); for (int i = 0; i < numTerms; i++) { - keys.add(new Input("буу" + _TestUtil.randomSimpleString(random()), 1 + random().nextInt(100))); + keys.add(new Input("буу" + TestUtil.randomSimpleString(random()), 1 + random().nextInt(100))); } keys.add(new Input("фуу бар буу фар", 12)); MockAnalyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false); @@ -88,7 +88,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { String addRandomEdit = addRandomEdit("фуу бар буу", 0); - List results = suggester.lookup(_TestUtil.stringToCharSequence(addRandomEdit, random()), false, 2); + List results = suggester.lookup(TestUtil.stringToCharSequence(addRandomEdit, random()), false, 2); assertEquals(addRandomEdit, 1, results.size()); assertEquals("фуу бар буу фар", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -107,29 +107,29 @@ public class FuzzySuggesterTest extends LuceneTestCase { FuzzySuggester suggester = new FuzzySuggester(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)); suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(_TestUtil.stringToCharSequence("bariar", random()), false, 2); + List results = suggester.lookup(TestUtil.stringToCharSequence("bariar", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); - results = suggester.lookup(_TestUtil.stringToCharSequence("barbr", random()), false, 2); + results = suggester.lookup(TestUtil.stringToCharSequence("barbr", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); - results = suggester.lookup(_TestUtil.stringToCharSequence("barbara", random()), false, 2); + results = suggester.lookup(TestUtil.stringToCharSequence("barbara", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbara", results.get(0).key.toString()); assertEquals(6, results.get(0).value, 0.01F); - results = suggester.lookup(_TestUtil.stringToCharSequence("barbar", random()), false, 2); + results = suggester.lookup(TestUtil.stringToCharSequence("barbar", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); assertEquals("barbara", results.get(1).key.toString()); assertEquals(6, results.get(1).value, 0.01F); - results = suggester.lookup(_TestUtil.stringToCharSequence("barbaa", random()), false, 2); + results = suggester.lookup(TestUtil.stringToCharSequence("barbaa", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -137,20 +137,20 @@ public class FuzzySuggesterTest extends LuceneTestCase { assertEquals(6, results.get(1).value, 0.01F); // top N of 2, but only foo is available - results = suggester.lookup(_TestUtil.stringToCharSequence("f", random()), false, 2); + results = suggester.lookup(TestUtil.stringToCharSequence("f", random()), false, 2); assertEquals(1, results.size()); assertEquals("foo", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); // top N of 1 for 'bar': we return this even though // barbar is higher because exactFirst is enabled: - results = suggester.lookup(_TestUtil.stringToCharSequence("bar", random()), false, 1); + results = suggester.lookup(TestUtil.stringToCharSequence("bar", random()), false, 1); assertEquals(1, results.size()); assertEquals("bar", results.get(0).key.toString()); assertEquals(10, results.get(0).value, 0.01F); // top N Of 2 for 'b' - results = suggester.lookup(_TestUtil.stringToCharSequence("b", random()), false, 2); + results = suggester.lookup(TestUtil.stringToCharSequence("b", random()), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -158,7 +158,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { assertEquals(10, results.get(1).value, 0.01F); // top N of 3 for 'ba' - results = suggester.lookup(_TestUtil.stringToCharSequence("ba", random()), false, 3); + results = suggester.lookup(TestUtil.stringToCharSequence("ba", random()), false, 3); assertEquals(3, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -181,19 +181,19 @@ public class FuzzySuggesterTest extends LuceneTestCase { FuzzySuggester.DEFAULT_NON_FUZZY_PREFIX, FuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH, FuzzySuggester.DEFAULT_UNICODE_AWARE); suggester.build(new InputArrayIterator(keys)); - List results = suggester.lookup(_TestUtil.stringToCharSequence("the ghost of chris", random()), false, 1); + List results = suggester.lookup(TestUtil.stringToCharSequence("the ghost of chris", random()), false, 1); assertEquals(1, results.size()); assertEquals("the ghost of christmas past", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); // omit the 'the' since its a stopword, its suggested anyway - results = suggester.lookup(_TestUtil.stringToCharSequence("ghost of chris", random()), false, 1); + results = suggester.lookup(TestUtil.stringToCharSequence("ghost of chris", random()), false, 1); assertEquals(1, results.size()); assertEquals("the ghost of christmas past", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); // omit the 'the' and 'of' since they are stopwords, its suggested anyway - results = suggester.lookup(_TestUtil.stringToCharSequence("ghost chris", random()), false, 1); + results = suggester.lookup(TestUtil.stringToCharSequence("ghost chris", random()), false, 1); assertEquals(1, results.size()); assertEquals("the ghost of christmas past", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); @@ -214,7 +214,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { // pass, and more generally if the analyzer can know // that the user's current query has ended at a word, // but, analyzers don't produce SEP tokens! - List r = suggester.lookup(_TestUtil.stringToCharSequence("ab c", random()), false, 2); + List r = suggester.lookup(TestUtil.stringToCharSequence("ab c", random()), false, 2); assertEquals(2, r.size()); // With no PRESERVE_SEPS specified, "ab c" should also @@ -613,7 +613,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { } for (int i = 0; i < numQueries; i++) { - int numTokens = _TestUtil.nextInt(random(), 1, 4); + int numTokens = TestUtil.nextInt(random(), 1, 4); String key; String analyzedKey; while(true) { @@ -625,7 +625,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { while (true) { // TODO: would be nice to fix this slowCompletor/comparator to // use full range, but we might lose some coverage too... - s = _TestUtil.randomSimpleString(random()); + s = TestUtil.randomSimpleString(random()); if (s.length() > 0) { if (token > 0) { key += " "; @@ -692,8 +692,8 @@ public class FuzzySuggesterTest extends LuceneTestCase { System.out.println("\nTEST: prefix=" + prefix); } - final int topN = _TestUtil.nextInt(random(), 1, 10); - List r = suggester.lookup(_TestUtil.stringToCharSequence(prefix, random()), false, topN); + final int topN = TestUtil.nextInt(random(), 1, 10); + List r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random()), false, topN); // 2. go thru whole set to find suggestions: List matches = new ArrayList(); @@ -919,7 +919,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { } private String randomSimpleString(int maxLen) { - final int len = _TestUtil.nextInt(random(), 1, maxLen); + final int len = TestUtil.nextInt(random(), 1, maxLen); final char[] chars = new char[len]; for(int j=0;j seen = new HashSet(); while (seen.size() < terms.length) { - String token = _TestUtil.randomSimpleString(random(), 1, 5); + String token = TestUtil.randomSimpleString(random(), 1, 5); if (!seen.contains(token)) { terms[seen.size()] = token; seen.add(token); @@ -325,7 +324,7 @@ public class TestFreeTextSuggester extends LuceneTestCase { totTokens += docs[i].length; } - int grams = _TestUtil.nextInt(random(), 1, 4); + int grams = TestUtil.nextInt(random(), 1, 4); if (VERBOSE) { System.out.println("TEST: " + terms.length + " terms; " + numDocs + " docs; " + grams + " grams"); @@ -400,7 +399,7 @@ public class TestFreeTextSuggester extends LuceneTestCase { int lookups = atLeast(100); for(int iter=0;iter keys = new ArrayList(); for (int i = 0; i < 5000; i++) { - keys.add(new Input(_TestUtil.randomSimpleString(r), -1)); + keys.add(new Input(TestUtil.randomSimpleString(r), -1)); } lookup.build(new InputArrayIterator(keys)); @@ -168,7 +168,7 @@ public class FSTCompletionTest extends LuceneTestCase { // are. Long previous = null; for (Input tf : keys) { - Long current = ((Number)lookup.get(_TestUtil.bytesToCharSequence(tf.term, random()))).longValue(); + Long current = ((Number)lookup.get(TestUtil.bytesToCharSequence(tf.term, random()))).longValue(); if (previous != null) { assertEquals(previous, current); } @@ -183,8 +183,8 @@ public class FSTCompletionTest extends LuceneTestCase { lookup.build(new InputArrayIterator(input)); assertEquals(input.size(), lookup.getCount()); for (Input tf : input) { - assertNotNull("Not found: " + tf.term.toString(), lookup.get(_TestUtil.bytesToCharSequence(tf.term, random()))); - assertEquals(tf.term.utf8ToString(), lookup.lookup(_TestUtil.bytesToCharSequence(tf.term, random()), true, 1).get(0).key.toString()); + assertNotNull("Not found: " + tf.term.toString(), lookup.get(TestUtil.bytesToCharSequence(tf.term, random()))); + assertEquals(tf.term.utf8ToString(), lookup.lookup(TestUtil.bytesToCharSequence(tf.term, random()), true, 1).get(0).key.toString()); } List result = lookup.lookup(stringToCharSequence("wit"), true, 5); @@ -221,7 +221,7 @@ public class FSTCompletionTest extends LuceneTestCase { } private CharSequence stringToCharSequence(String prefix) { - return _TestUtil.stringToCharSequence(prefix, random()); + return TestUtil.stringToCharSequence(prefix, random()); } private void assertMatchEquals(List res, String... expected) { diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java index 6b4c298332e..540fadedf11 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java @@ -37,15 +37,15 @@ public class TestSort extends LuceneTestCase { @Before public void prepareTempDir() throws IOException { - tempDir = _TestUtil.getTempDir("mergesort"); - _TestUtil.rmDir(tempDir); + tempDir = TestUtil.getTempDir("mergesort"); + TestUtil.rmDir(tempDir); tempDir.mkdirs(); } @After public void cleanup() throws IOException { if (tempDir != null) - _TestUtil.rmDir(tempDir); + TestUtil.rmDir(tempDir); } @Test diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java index d1c47149642..9439a3a5432 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java @@ -24,7 +24,7 @@ import org.apache.lucene.search.suggest.Input; import org.apache.lucene.search.suggest.InputArrayIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class WFSTCompletionTest extends LuceneTestCase { @@ -41,25 +41,25 @@ public class WFSTCompletionTest extends LuceneTestCase { suggester.build(new InputArrayIterator(keys)); // top N of 2, but only foo is available - List results = suggester.lookup(_TestUtil.stringToCharSequence("f", random), false, 2); + List results = suggester.lookup(TestUtil.stringToCharSequence("f", random), false, 2); assertEquals(1, results.size()); assertEquals("foo", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); // make sure we don't get a dup exact suggestion: - results = suggester.lookup(_TestUtil.stringToCharSequence("foo", random), false, 2); + results = suggester.lookup(TestUtil.stringToCharSequence("foo", random), false, 2); assertEquals(1, results.size()); assertEquals("foo", results.get(0).key.toString()); assertEquals(50, results.get(0).value, 0.01F); // top N of 1 for 'bar': we return this even though barbar is higher - results = suggester.lookup(_TestUtil.stringToCharSequence("bar", random), false, 1); + results = suggester.lookup(TestUtil.stringToCharSequence("bar", random), false, 1); assertEquals(1, results.size()); assertEquals("bar", results.get(0).key.toString()); assertEquals(10, results.get(0).value, 0.01F); // top N Of 2 for 'b' - results = suggester.lookup(_TestUtil.stringToCharSequence("b", random), false, 2); + results = suggester.lookup(TestUtil.stringToCharSequence("b", random), false, 2); assertEquals(2, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -67,7 +67,7 @@ public class WFSTCompletionTest extends LuceneTestCase { assertEquals(10, results.get(1).value, 0.01F); // top N of 3 for 'ba' - results = suggester.lookup(_TestUtil.stringToCharSequence("ba", random), false, 3); + results = suggester.lookup(TestUtil.stringToCharSequence("ba", random), false, 3); assertEquals(3, results.size()); assertEquals("barbar", results.get(0).key.toString()); assertEquals(12, results.get(0).value, 0.01F); @@ -138,7 +138,7 @@ public class WFSTCompletionTest extends LuceneTestCase { while (true) { // TODO: would be nice to fix this slowCompletor/comparator to // use full range, but we might lose some coverage too... - s = _TestUtil.randomSimpleString(random()); + s = TestUtil.randomSimpleString(random()); if (!slowCompletor.containsKey(s)) { break; } @@ -159,8 +159,8 @@ public class WFSTCompletionTest extends LuceneTestCase { assertEquals(numWords, suggester.getCount()); Random random = new Random(random().nextLong()); for (String prefix : allPrefixes) { - final int topN = _TestUtil.nextInt(random, 1, 10); - List r = suggester.lookup(_TestUtil.stringToCharSequence(prefix, random), false, topN); + final int topN = TestUtil.nextInt(random, 1, 10); + List r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random), false, topN); // 2. go thru whole treemap (slowCompletor) and check its actually the best suggestion final List matches = new ArrayList(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java index aae538f24b5..7a8ca28255f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java @@ -41,7 +41,7 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.Rethrow; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Base class for all Lucene unit tests that use TokenStreams. @@ -493,12 +493,12 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { boolean useCharFilter = random.nextBoolean(); Directory dir = null; RandomIndexWriter iw = null; - final String postingsFormat = _TestUtil.getPostingsFormat("dummy"); + final String postingsFormat = TestUtil.getPostingsFormat("dummy"); boolean codecOk = iterations * maxWordLength < 100000 || !(postingsFormat.equals("Memory") || postingsFormat.equals("SimpleText")); if (rarely(random) && codecOk) { - dir = newFSDirectory(_TestUtil.getTempDir("bttc")); + dir = newFSDirectory(TestUtil.getTempDir("bttc")); iw = new RandomIndexWriter(new Random(seed), dir, a); } boolean success = false; @@ -506,7 +506,7 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { checkRandomData(new Random(seed), a, iterations, maxWordLength, useCharFilter, simple, offsetsAreCorrect, iw); // now test with multiple threads: note we do the EXACT same thing we did before in each thread, // so this should only really fail from another thread if its an actual thread problem - int numThreads = _TestUtil.nextInt(random, 2, 4); + int numThreads = TestUtil.nextInt(random, 2, 4); AnalysisThread threads[] = new AnalysisThread[numThreads]; for (int i = 0; i < threads.length; i++) { threads[i] = new AnalysisThread(seed, a, iterations, maxWordLength, useCharFilter, simple, offsetsAreCorrect, iw); @@ -556,7 +556,7 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { if (random.nextBoolean()) { ft.setOmitNorms(true); } - String pf = _TestUtil.getPostingsFormat("dummy"); + String pf = TestUtil.getPostingsFormat("dummy"); boolean supportsOffsets = !doesntSupportOffsets.contains(pf); switch(random.nextInt(4)) { case 0: ft.setIndexOptions(IndexOptions.DOCS_ONLY); break; @@ -598,7 +598,7 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { } } else { // synthetic - text = _TestUtil.randomAnalysisString(random, maxWordLength, simple); + text = TestUtil.randomAnalysisString(random, maxWordLength, simple); } try { diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java index db4223e99b6..3a1c5b35fa2 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java @@ -46,7 +46,8 @@ import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; /** * Base test class for testing Unicode collation. @@ -249,7 +250,7 @@ public abstract class CollationTestBase extends LuceneTestCase { public void assertThreadSafe(final Analyzer analyzer) throws Exception { int numTestPoints = 100; - int numThreads = _TestUtil.nextInt(random(), 3, 5); + int numThreads = TestUtil.nextInt(random(), 3, 5); final HashMap map = new HashMap(); // create a map up front. @@ -257,7 +258,7 @@ public abstract class CollationTestBase extends LuceneTestCase { // and ensure they are the same as the ones we produced in serial fashion. for (int i = 0; i < numTestPoints; i++) { - String term = _TestUtil.randomSimpleString(random()); + String term = TestUtil.randomSimpleString(random()); try (TokenStream ts = analyzer.tokenStream("fake", term)) { TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); BytesRef bytes = termAtt.getBytesRef(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockGraphTokenFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockGraphTokenFilter.java index 35f14b42936..ecf4f3fe66e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockGraphTokenFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockGraphTokenFilter.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.Random; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; // TODO: sometimes remove tokens too...? @@ -55,7 +55,7 @@ public final class MockGraphTokenFilter extends LookaheadTokenFilter 0 && maxPos <= nextPos && random.nextInt(5) == 3) { - final int holeSize = _TestUtil.nextInt(random, 1, 5); + final int holeSize = TestUtil.nextInt(random, 1, 5); posIncAtt.setPositionIncrement(posInc + holeSize); nextPos += holeSize; } diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockReaderWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockReaderWrapper.java index d089f4da1c5..742059edcc0 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockReaderWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockReaderWrapper.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.io.Reader; import java.util.Random; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** Wraps a Reader, and can throw random or fixed * exceptions, and spoon feed read chars. */ @@ -68,7 +68,7 @@ public class MockReaderWrapper extends Reader { } else { // Spoon-feed: intentionally maybe return less than // the consumer asked for - realLen = _TestUtil.nextInt(random, 1, len); + realLen = TestUtil.nextInt(random, 1, len); } if (excAtChar != -1) { final int left = excAtChar - readSoFar; diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java index 254cc962994..eacde8e79db 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java @@ -64,7 +64,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Randomly combines terms index impl w/ postings impls. @@ -101,9 +101,9 @@ public final class MockRandomPostingsFormat extends PostingsFormat { public MockIntStreamFactory(Random random) { salt = random.nextInt(); delegates.add(new MockSingleIntFactory()); - final int blockSize = _TestUtil.nextInt(random, 1, 2000); + final int blockSize = TestUtil.nextInt(random, 1, 2000); delegates.add(new MockFixedIntBlockPostingsFormat.MockIntFactory(blockSize)); - final int baseBlockSize = _TestUtil.nextInt(random, 1, 127); + final int baseBlockSize = TestUtil.nextInt(random, 1, 127); delegates.add(new MockVariableIntBlockPostingsFormat.MockIntFactory(baseBlockSize)); // TODO: others } @@ -147,7 +147,7 @@ public final class MockRandomPostingsFormat extends PostingsFormat { // we pull this before the seed intentionally: because its not consumed at runtime // (the skipInterval is written into postings header) - int skipInterval = _TestUtil.nextInt(seedRandom, minSkipInterval, 10); + int skipInterval = TestUtil.nextInt(seedRandom, minSkipInterval, 10); if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: skipInterval=" + skipInterval); @@ -183,7 +183,7 @@ public final class MockRandomPostingsFormat extends PostingsFormat { } if (random.nextBoolean()) { - final int totTFCutoff = _TestUtil.nextInt(random, 1, 20); + final int totTFCutoff = TestUtil.nextInt(random, 1, 20); if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: writing pulsing postings with totTFCutoff=" + totTFCutoff); } @@ -222,7 +222,7 @@ public final class MockRandomPostingsFormat extends PostingsFormat { // TODO: would be nice to allow 1 but this is very // slow to write - final int minTermsInBlock = _TestUtil.nextInt(random, 2, 100); + final int minTermsInBlock = TestUtil.nextInt(random, 2, 100); final int maxTermsInBlock = Math.max(2, (minTermsInBlock-1)*2 + random.nextInt(100)); boolean success = false; @@ -245,7 +245,7 @@ public final class MockRandomPostingsFormat extends PostingsFormat { final TermsIndexWriterBase indexWriter; try { if (random.nextBoolean()) { - int termIndexInterval = _TestUtil.nextInt(random, 1, 100); + int termIndexInterval = TestUtil.nextInt(random, 1, 100); if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: fixed-gap terms index (tii=" + termIndexInterval + ")"); } @@ -254,18 +254,18 @@ public final class MockRandomPostingsFormat extends PostingsFormat { final VariableGapTermsIndexWriter.IndexTermSelector selector; final int n2 = random.nextInt(3); if (n2 == 0) { - final int tii = _TestUtil.nextInt(random, 1, 100); + final int tii = TestUtil.nextInt(random, 1, 100); selector = new VariableGapTermsIndexWriter.EveryNTermSelector(tii); if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: variable-gap terms index (tii=" + tii + ")"); } } else if (n2 == 1) { - final int docFreqThresh = _TestUtil.nextInt(random, 2, 100); - final int tii = _TestUtil.nextInt(random, 1, 100); + final int docFreqThresh = TestUtil.nextInt(random, 2, 100); + final int tii = TestUtil.nextInt(random, 1, 100); selector = new VariableGapTermsIndexWriter.EveryNOrDocFreqTermSelector(docFreqThresh, tii); } else { final long seed2 = random.nextLong(); - final int gap = _TestUtil.nextInt(random, 2, 40); + final int gap = TestUtil.nextInt(random, 2, 40); if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: random-gap terms index (max gap=" + gap + ")"); } @@ -322,7 +322,7 @@ public final class MockRandomPostingsFormat extends PostingsFormat { final Random random = new Random(seed); - int readBufferSize = _TestUtil.nextInt(random, 1, 4096); + int readBufferSize = TestUtil.nextInt(random, 1, 4096); if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: readBufferSize=" + readBufferSize); } @@ -343,7 +343,7 @@ public final class MockRandomPostingsFormat extends PostingsFormat { } if (random.nextBoolean()) { - final int totTFCutoff = _TestUtil.nextInt(random, 1, 20); + final int totTFCutoff = TestUtil.nextInt(random, 1, 20); if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: reading pulsing postings with totTFCutoff=" + totTFCutoff); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AlcoholicMergePolicy.java b/lucene/test-framework/src/java/org/apache/lucene/index/AlcoholicMergePolicy.java index 25b922e9c9f..fedf22490f6 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/AlcoholicMergePolicy.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/AlcoholicMergePolicy.java @@ -24,7 +24,7 @@ import java.util.Locale; import java.util.Random; import java.util.TimeZone; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** *

@@ -47,9 +47,9 @@ public class AlcoholicMergePolicy extends LogMergePolicy { public AlcoholicMergePolicy(TimeZone tz, Random random) { this.calendar = new GregorianCalendar(tz, Locale.ROOT); - calendar.setTimeInMillis(_TestUtil.nextLong(random, 0, Long.MAX_VALUE)); + calendar.setTimeInMillis(TestUtil.nextLong(random, 0, Long.MAX_VALUE)); this.random = random; - maxMergeSize = _TestUtil.nextInt(random, 1024*1024, Integer.MAX_VALUE); + maxMergeSize = TestUtil.nextInt(random, 1024 * 1024, Integer.MAX_VALUE); } @Override diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java index 381606f1cc7..119dcb94b65 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java @@ -26,7 +26,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.packed.PackedInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; @@ -47,7 +47,7 @@ public abstract class BaseCompressingDocValuesFormatTestCase extends BaseDocValu final IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); final IndexWriter iwriter = new IndexWriter(dir, iwc); - final int uniqueValueCount = _TestUtil.nextInt(random(), 1, 256); + final int uniqueValueCount = TestUtil.nextInt(random(), 1, 256); final List values = new ArrayList(); final Document doc = new Document(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java index 9723df05205..aab13f07a85 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java @@ -56,7 +56,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefHash; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; @@ -1139,11 +1139,11 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { int numDocs = atLeast(100); BytesRefHash hash = new BytesRefHash(); Map docToString = new HashMap(); - int maxLength = _TestUtil.nextInt(random(), 1, 50); + int maxLength = TestUtil.nextInt(random(), 1, 50); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(newTextField("id", "" + i, Field.Store.YES)); - String string = _TestUtil.randomRealisticUnicodeString(random(), 1, maxLength); + String string = TestUtil.randomRealisticUnicodeString(random(), 1, maxLength); BytesRef br = new BytesRef(string); doc.add(new SortedDocValuesField("field", br)); hash.add(br); @@ -1175,7 +1175,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { Document doc = new Document(); String id = "" + i + numDocs; doc.add(newTextField("id", id, Field.Store.YES)); - String string = _TestUtil.randomRealisticUnicodeString(random(), 1, maxLength); + String string = TestUtil.randomRealisticUnicodeString(random(), 1, maxLength); BytesRef br = new BytesRef(string); hash.add(br); docToString.put(id, string); @@ -1221,7 +1221,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { doTestNumericsVsStoredFields(new LongProducer() { @Override long next() { - return _TestUtil.nextLong(random(), minValue, maxValue); + return TestUtil.nextLong(random(), minValue, maxValue); } }); } @@ -1285,7 +1285,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { doTestMissingVsFieldCache(new LongProducer() { @Override long next() { - return _TestUtil.nextLong(random(), minValue, maxValue); + return TestUtil.nextLong(random(), minValue, maxValue); } }); } @@ -1431,7 +1431,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { if (minLength == maxLength) { length = minLength; // fixed length } else { - length = _TestUtil.nextInt(random(), minLength, maxLength); + length = TestUtil.nextInt(random(), minLength, maxLength); } byte buffer[] = new byte[length]; random().nextBytes(buffer); @@ -1470,7 +1470,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { public void testBinaryFixedLengthVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - int fixedLength = _TestUtil.nextInt(random(), 0, 10); + int fixedLength = TestUtil.nextInt(random(), 0, 10); doTestBinaryVsStoredFields(fixedLength, fixedLength); } } @@ -1502,7 +1502,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { if (minLength == maxLength) { length = minLength; // fixed length } else { - length = _TestUtil.nextInt(random(), minLength, maxLength); + length = TestUtil.nextInt(random(), minLength, maxLength); } byte buffer[] = new byte[length]; random().nextBytes(buffer); @@ -1558,9 +1558,9 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { if (minLength == maxLength) { length = minLength; // fixed length } else { - length = _TestUtil.nextInt(random(), minLength, maxLength); + length = TestUtil.nextInt(random(), minLength, maxLength); } - String value = _TestUtil.randomSimpleString(random(), length); + String value = TestUtil.randomSimpleString(random(), length); indexedField.setStringValue(value); dvField.setBytesValue(new BytesRef(value)); writer.addDocument(doc); @@ -1592,7 +1592,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { public void testSortedFixedLengthVsStoredFields() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - int fixedLength = _TestUtil.nextInt(random(), 1, 10); + int fixedLength = TestUtil.nextInt(random(), 1, 10); doTestSortedVsStoredFields(fixedLength, fixedLength); } } @@ -1600,7 +1600,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { public void testSortedFixedLengthVsFieldCache() throws Exception { int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - int fixedLength = _TestUtil.nextInt(random(), 1, 10); + int fixedLength = TestUtil.nextInt(random(), 1, 10); doTestSortedVsFieldCache(fixedLength, fixedLength); } } @@ -2082,13 +2082,13 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { if (minLength == maxLength) { length = minLength; // fixed length } else { - length = _TestUtil.nextInt(random(), minLength, maxLength); + length = TestUtil.nextInt(random(), minLength, maxLength); } - int numValues = _TestUtil.nextInt(random(), 0, maxValuesPerDoc); + int numValues = TestUtil.nextInt(random(), 0, maxValuesPerDoc); // create a random set of strings Set values = new TreeSet(); for (int v = 0; v < numValues; v++) { - values.add(_TestUtil.randomSimpleString(random(), length)); + values.add(TestUtil.randomSimpleString(random(), length)); } // add ordered to the stored field @@ -2146,7 +2146,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { assumeTrue("Codec does not support SORTED_SET", defaultCodecSupportsSortedSet()); int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - int fixedLength = _TestUtil.nextInt(random(), 1, 10); + int fixedLength = TestUtil.nextInt(random(), 1, 10); doTestSortedSetVsStoredFields(fixedLength, fixedLength, 16); } } @@ -2163,7 +2163,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { assumeTrue("Codec does not support SORTED_SET", defaultCodecSupportsSortedSet()); int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - int fixedLength = _TestUtil.nextInt(random(), 1, 10); + int fixedLength = TestUtil.nextInt(random(), 1, 10); doTestSortedSetVsStoredFields(fixedLength, fixedLength, 1); } } @@ -2256,7 +2256,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { // random seekExact(ord) for (long i = 0; i < numOrds; i++) { - long randomOrd = _TestUtil.nextLong(random(), 0, numOrds-1); + long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1); expected.seekExact(randomOrd); actual.seekExact(randomOrd); assertEquals(expected.ord(), actual.ord()); @@ -2265,7 +2265,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { // random seekExact(BytesRef) for (long i = 0; i < numOrds; i++) { - long randomOrd = _TestUtil.nextLong(random(), 0, numOrds-1); + long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1); expected.seekExact(randomOrd); actual.seekExact(expected.term()); assertEquals(expected.ord(), actual.ord()); @@ -2274,7 +2274,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { // random seekCeil(BytesRef) for (long i = 0; i < numOrds; i++) { - BytesRef target = new BytesRef(_TestUtil.randomUnicodeString(random())); + BytesRef target = new BytesRef(TestUtil.randomUnicodeString(random())); SeekStatus expectedStatus = expected.seekCeil(target); assertEquals(expectedStatus, actual.seekCeil(target)); if (expectedStatus != SeekStatus.END) { @@ -2299,13 +2299,13 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { if (minLength == maxLength) { length = minLength; // fixed length } else { - length = _TestUtil.nextInt(random(), minLength, maxLength); + length = TestUtil.nextInt(random(), minLength, maxLength); } int numValues = random().nextInt(17); // create a random list of strings List values = new ArrayList(); for (int v = 0; v < numValues; v++) { - values.add(_TestUtil.randomSimpleString(random(), length)); + values.add(TestUtil.randomSimpleString(random(), length)); } // add in any order to the indexed field @@ -2363,7 +2363,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { assumeTrue("Codec does not support SORTED_SET", defaultCodecSupportsSortedSet()); int numIterations = atLeast(1); for (int i = 0; i < numIterations; i++) { - int fixedLength = _TestUtil.nextInt(random(), 1, 10); + int fixedLength = TestUtil.nextInt(random(), 1, 10); doTestSortedSetVsUninvertedField(fixedLength, fixedLength); } } @@ -2615,17 +2615,17 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { Analyzer analyzer = new MockAnalyzer(random()); // FSDirectory because SimpleText will consume gobbs of // space when storing big binary values: - Directory d = newFSDirectory(_TestUtil.getTempDir("hugeBinaryValues")); + Directory d = newFSDirectory(TestUtil.getTempDir("hugeBinaryValues")); boolean doFixed = random().nextBoolean(); int numDocs; int fixedLength = 0; if (doFixed) { // Sometimes make all values fixed length since some // codecs have different code paths for this: - numDocs = _TestUtil.nextInt(random(), 10, 20); - fixedLength = _TestUtil.nextInt(random(), 65537, 256*1024); + numDocs = TestUtil.nextInt(random(), 10, 20); + fixedLength = TestUtil.nextInt(random(), 65537, 256 * 1024); } else { - numDocs = _TestUtil.nextInt(random(), 100, 200); + numDocs = TestUtil.nextInt(random(), 100, 200); } IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); List docBytes = new ArrayList(); @@ -2640,9 +2640,9 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { if (doFixed) { numBytes = fixedLength; } else if (docID == 0 || random().nextInt(5) == 3) { - numBytes = _TestUtil.nextInt(random(), 65537, 3*1024*1024); + numBytes = TestUtil.nextInt(random(), 65537, 3 * 1024 * 1024); } else { - numBytes = _TestUtil.nextInt(random(), 1, 1024*1024); + numBytes = TestUtil.nextInt(random(), 1, 1024 * 1024); } totalBytes += numBytes; if (totalBytes > 5 * 1024*1024) { @@ -2713,17 +2713,17 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { Analyzer analyzer = new MockAnalyzer(random()); // FSDirectory because SimpleText will consume gobbs of // space when storing big binary values: - Directory d = newFSDirectory(_TestUtil.getTempDir("hugeBinaryValues")); + Directory d = newFSDirectory(TestUtil.getTempDir("hugeBinaryValues")); boolean doFixed = random().nextBoolean(); int numDocs; int fixedLength = 0; if (doFixed) { // Sometimes make all values fixed length since some // codecs have different code paths for this: - numDocs = _TestUtil.nextInt(random(), 10, 20); + numDocs = TestUtil.nextInt(random(), 10, 20); fixedLength = Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH; } else { - numDocs = _TestUtil.nextInt(random(), 100, 200); + numDocs = TestUtil.nextInt(random(), 100, 200); } IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); List docBytes = new ArrayList(); @@ -2740,7 +2740,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { } else if (docID == 0 || random().nextInt(5) == 3) { numBytes = Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH; } else { - numBytes = _TestUtil.nextInt(random(), 1, Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH); + numBytes = TestUtil.nextInt(random(), 1, Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH); } totalBytes += numBytes; if (totalBytes > 5 * 1024*1024) { @@ -2799,7 +2799,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { int numDocs = atLeast(300); for (int i = 0; i < numDocs; i++) { idField.setStringValue(Integer.toString(i)); - int length = _TestUtil.nextInt(random(), 0, 8); + int length = TestUtil.nextInt(random(), 0, 8); byte buffer[] = new byte[length]; random().nextBytes(buffer); storedBinField.setBytesValue(buffer); @@ -2824,7 +2824,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { // compare final DirectoryReader ir = DirectoryReader.open(dir); - int numThreads = _TestUtil.nextInt(random(), 2, 7); + int numThreads = TestUtil.nextInt(random(), 2, 7); Thread threads[] = new Thread[numThreads]; final CountDownLatch startingGun = new CountDownLatch(1); @@ -2850,7 +2850,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { assertEquals(Long.parseLong(expected), numerics.get(j)); } } - _TestUtil.checkReader(ir); + TestUtil.checkReader(ir); } catch (Exception e) { throw new RuntimeException(e); } @@ -2884,7 +2884,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { int numDocs = atLeast(300); for (int i = 0; i < numDocs; i++) { idField.setStringValue(Integer.toString(i)); - int length = _TestUtil.nextInt(random(), 0, 8); + int length = TestUtil.nextInt(random(), 0, 8); byte buffer[] = new byte[length]; random().nextBytes(buffer); storedBinField.setBytesValue(buffer); @@ -2907,7 +2907,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { int numSortedSetFields = random().nextInt(3); Set values = new TreeSet(); for (int j = 0; j < numSortedSetFields; j++) { - values.add(_TestUtil.randomSimpleString(random())); + values.add(TestUtil.randomSimpleString(random())); } for (String v : values) { doc.add(new SortedSetDocValuesField("dvSortedSet", new BytesRef(v))); @@ -2929,7 +2929,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { // compare final DirectoryReader ir = DirectoryReader.open(dir); - int numThreads = _TestUtil.nextInt(random(), 2, 7); + int numThreads = TestUtil.nextInt(random(), 2, 7); Thread threads[] = new Thread[numThreads]; final CountDownLatch startingGun = new CountDownLatch(1); @@ -2997,7 +2997,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { } } } - _TestUtil.checkReader(ir); + TestUtil.checkReader(ir); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java index 8938be6de09..d48d5e12ace 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -57,7 +56,7 @@ import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.RamUsageEstimator; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -149,12 +148,12 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { public SeedPostings(long seed, int minDocFreq, int maxDocFreq, Bits liveDocs, IndexOptions options, boolean allowPayloads) { random = new Random(seed); docRandom = new Random(random.nextLong()); - docFreq = _TestUtil.nextInt(random, minDocFreq, maxDocFreq); + docFreq = TestUtil.nextInt(random, minDocFreq, maxDocFreq); this.liveDocs = liveDocs; this.allowPayloads = allowPayloads; // TODO: more realistic to inversely tie this to numDocs: - maxDocSpacing = _TestUtil.nextInt(random, 1, 100); + maxDocSpacing = TestUtil.nextInt(random, 1, 100); if (random.nextInt(10) == 7) { // 10% of the time create big payloads: @@ -193,21 +192,21 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { docID++; } else { // TODO: sometimes have a biggish gap here! - docID += _TestUtil.nextInt(docRandom, 1, maxDocSpacing); + docID += TestUtil.nextInt(docRandom, 1, maxDocSpacing); } if (random.nextInt(200) == 17) { - freq = _TestUtil.nextInt(random, 1, 1000); + freq = TestUtil.nextInt(random, 1, 1000); } else if (random.nextInt(10) == 17) { - freq = _TestUtil.nextInt(random, 1, 20); + freq = TestUtil.nextInt(random, 1, 20); } else { - freq = _TestUtil.nextInt(random, 1, 4); + freq = TestUtil.nextInt(random, 1, 4); } pos = 0; offset = 0; posUpto = 0; - posSpacing = _TestUtil.nextInt(random, 1, 100); + posSpacing = TestUtil.nextInt(random, 1, 100); upto++; return docID; @@ -239,7 +238,7 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { } else if (posSpacing == 1) { pos++; } else { - pos += _TestUtil.nextInt(random, 1, posSpacing); + pos += TestUtil.nextInt(random, 1, posSpacing); } if (payloadSize != 0) { @@ -344,7 +343,7 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { totalPayloadBytes = 0; fields = new TreeMap>(); - final int numFields = _TestUtil.nextInt(random(), 1, 5); + final int numFields = TestUtil.nextInt(random(), 1, 5); if (VERBOSE) { System.out.println("TEST: " + numFields + " fields"); } @@ -353,7 +352,7 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { FieldInfo[] fieldInfoArray = new FieldInfo[numFields]; int fieldUpto = 0; while (fieldUpto < numFields) { - String field = _TestUtil.randomSimpleString(random()); + String field = TestUtil.randomSimpleString(random()); if (fields.containsKey(field)) { continue; } @@ -371,11 +370,11 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { if (random().nextInt(10) == 7) { numTerms = atLeast(50); } else { - numTerms = _TestUtil.nextInt(random(), 2, 20); + numTerms = TestUtil.nextInt(random(), 2, 20); } for(int termUpto=0;termUpto 0) { targetDocID = expected.docID() + skipDocIDs; expected.advance(targetDocID); @@ -1083,7 +1082,7 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { final boolean alwaysTestMax) throws Exception { if (options.contains(Option.THREADS)) { - int numThreads = _TestUtil.nextInt(random(), 2, 5); + int numThreads = TestUtil.nextInt(random(), 2, 5); Thread[] threads = new Thread[numThreads]; for(int threadUpto=0;threadUpto fieldIDs = new ArrayList(); @@ -129,7 +130,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends LuceneTestCase { for(int field: fieldIDs) { final String s; if (rand.nextInt(4) != 3) { - s = _TestUtil.randomUnicodeString(rand, 1000); + s = TestUtil.randomUnicodeString(rand, 1000); doc.add(newField("f"+field, s, customType2)); } else { s = null; @@ -349,7 +350,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends LuceneTestCase { ft.setStored(true); ft.freeze(); - final String string = _TestUtil.randomSimpleString(random(), 50); + final String string = TestUtil.randomSimpleString(random(), 50); final byte[] bytes = string.getBytes("UTF-8"); final long l = random().nextBoolean() ? random().nextInt(42) : random().nextLong(); final int i = random().nextBoolean() ? random().nextInt(42) : random().nextInt(); @@ -597,7 +598,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends LuceneTestCase { // for this test we force a FS dir // we can't just use newFSDirectory, because this test doesn't really index anything. // so if we get NRTCachingDir+SimpleText, we make massive stored fields and OOM (LUCENE-4484) - Directory dir = new MockDirectoryWrapper(random(), new MMapDirectory(_TestUtil.getTempDir("testBigDocuments"))); + Directory dir = new MockDirectoryWrapper(random(), new MMapDirectory(TestUtil.getTempDir("testBigDocuments"))); IndexWriterConfig iwConf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); iwConf.setMaxBufferedDocs(RandomInts.randomIntBetween(random(), 2, 30)); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java index d170a0e976c..5b0bdd02abc 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java @@ -48,7 +48,7 @@ import org.apache.lucene.util.AttributeImpl; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import com.carrotsearch.randomizedtesting.generators.RandomPicks; @@ -216,17 +216,17 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { final int o = random().nextInt(sampleTerms.length); terms[i] = sampleTerms[o]; termBytes[i] = sampleTermBytes[o]; - positionsIncrements[i] = _TestUtil.nextInt(random(), i == 0 ? 1 : 0, 10); + positionsIncrements[i] = TestUtil.nextInt(random(), i == 0 ? 1 : 0, 10); if (offsetsGoBackwards) { startOffsets[i] = random().nextInt(); endOffsets[i] = random().nextInt(); } else { if (i == 0) { - startOffsets[i] = _TestUtil.nextInt(random(), 0, 1 << 16); + startOffsets[i] = TestUtil.nextInt(random(), 0, 1 << 16); } else { - startOffsets[i] = startOffsets[i-1] + _TestUtil.nextInt(random(), 0, rarely() ? 1 << 16 : 20); + startOffsets[i] = startOffsets[i-1] + TestUtil.nextInt(random(), 0, rarely() ? 1 << 16 : 20); } - endOffsets[i] = startOffsets[i] + _TestUtil.nextInt(random(), 0, rarely() ? 1 << 10 : 20); + endOffsets[i] = startOffsets[i] + TestUtil.nextInt(random(), 0, rarely() ? 1 << 10 : 20); } } @@ -320,7 +320,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { this.fieldNames[i] = RandomPicks.randomFrom(random(), fieldNames); } while (usedFileNames.contains(this.fieldNames[i])); usedFileNames.add(this.fieldNames[i]); - tokenStreams[i] = new RandomTokenStream(_TestUtil.nextInt(random(), 1, maxTermCount), sampleTerms, sampleTermBytes); + tokenStreams[i] = new RandomTokenStream(TestUtil.nextInt(random(), 1, maxTermCount), sampleTerms, sampleTermBytes); } } @@ -343,14 +343,14 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { protected RandomDocumentFactory(int distinctFieldNames, int disctinctTerms) { final Set fieldNames = new HashSet(); while (fieldNames.size() < distinctFieldNames) { - fieldNames.add(_TestUtil.randomSimpleString(random())); + fieldNames.add(TestUtil.randomSimpleString(random())); fieldNames.remove("id"); } this.fieldNames = fieldNames.toArray(new String[0]); terms = new String[disctinctTerms]; termBytes = new BytesRef[disctinctTerms]; for (int i = 0; i < disctinctTerms; ++i) { - terms[i] = _TestUtil.randomRealisticUnicodeString(random()); + terms[i] = TestUtil.randomRealisticUnicodeString(random()); termBytes[i] = new BytesRef(terms[i]); } } @@ -525,7 +525,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { final Document emptyDoc = new Document(); final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); - final RandomDocument doc = docFactory.newDocument(_TestUtil.nextInt(random(), 1, 3), 20, options); + final RandomDocument doc = docFactory.newDocument(TestUtil.nextInt(random(), 1, 3), 20, options); for (int i = 0; i < numDocs; ++i) { if (i == docWithVectors) { writer.addDocument(addId(doc.toDocument(), "42")); @@ -560,7 +560,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { } final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); - final RandomDocument doc = docFactory.newDocument(_TestUtil.nextInt(random(), 1, 2), atLeast(20000), options); + final RandomDocument doc = docFactory.newDocument(TestUtil.nextInt(random(), 1, 2), atLeast(20000), options); writer.addDocument(doc.toDocument()); final IndexReader reader = writer.getReader(); assertEquals(doc, reader.getTermVectors(0)); @@ -587,7 +587,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { // different options for the same field public void testMixedOptions() throws IOException { - final int numFields = _TestUtil.nextInt(random(), 1, 3); + final int numFields = TestUtil.nextInt(random(), 1, 3); final RandomDocumentFactory docFactory = new RandomDocumentFactory(numFields, 10); for (Options options1 : validOptions()) { for (Options options2 : validOptions()) { @@ -617,7 +617,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { final int numDocs = atLeast(100); final RandomDocument[] docs = new RandomDocument[numDocs]; for (int i = 0; i < numDocs; ++i) { - docs[i] = docFactory.newDocument(_TestUtil.nextInt(random(), 1, 3), _TestUtil.nextInt(random(), 10, 50), randomOptions()); + docs[i] = docFactory.newDocument(TestUtil.nextInt(random(), 1, 3), TestUtil.nextInt(random(), 10, 50), randomOptions()); } final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); @@ -645,7 +645,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { for (Options options : validOptions()) { final RandomDocument[] docs = new RandomDocument[numDocs]; for (int i = 0; i < numDocs; ++i) { - docs[i] = docFactory.newDocument(_TestUtil.nextInt(random(), 1, 3), atLeast(10), options); + docs[i] = docFactory.newDocument(TestUtil.nextInt(random(), 1, 3), atLeast(10), options); } final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); @@ -681,7 +681,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { for (Options options : validOptions()) { final RandomDocument[] docs = new RandomDocument[numDocs]; for (int i = 0; i < numDocs; ++i) { - docs[i] = docFactory.newDocument(_TestUtil.nextInt(random(), 1, 3), atLeast(10), options); + docs[i] = docFactory.newDocument(TestUtil.nextInt(random(), 1, 3), atLeast(10), options); } final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java b/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java index 0459de2365b..d4ec400c89e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java @@ -25,8 +25,7 @@ import java.util.List; import java.util.Map; import java.util.Random; -import org.apache.lucene.index.MergePolicy.MergeTrigger; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * MergePolicy that makes random decisions for testing. @@ -64,7 +63,7 @@ public class MockRandomMergePolicy extends MergePolicy { // TODO: sometimes make more than 1 merge? mergeSpec = new MergeSpecification(); - final int segsToMerge = _TestUtil.nextInt(random, 1, numSegments); + final int segsToMerge = TestUtil.nextInt(random, 1, numSegments); mergeSpec.add(new OneMerge(segments.subList(0, segsToMerge))); } @@ -93,7 +92,7 @@ public class MockRandomMergePolicy extends MergePolicy { int upto = 0; while(upto < eligibleSegments.size()) { int max = Math.min(10, eligibleSegments.size()-upto); - int inc = max <= 2 ? max : _TestUtil.nextInt(random, 2, max); + int inc = max <= 2 ? max : TestUtil.nextInt(random, 2, max); mergeSpec.add(new OneMerge(eligibleSegments.subList(upto, upto+inc))); upto += inc; } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java index 410b502642e..5e3911fa373 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java @@ -55,7 +55,7 @@ import org.apache.lucene.codecs.pulsing.Pulsing41PostingsFormat; import org.apache.lucene.codecs.simpletext.SimpleTextDocValuesFormat; import org.apache.lucene.codecs.simpletext.SimpleTextPostingsFormat; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Codec that assigns per-field random postings formats. @@ -123,9 +123,9 @@ public class RandomCodec extends Lucene46Codec { this.perFieldSeed = random.nextInt(); // TODO: make it possible to specify min/max iterms per // block via CL: - int minItemsPerBlock = _TestUtil.nextInt(random, 2, 100); + int minItemsPerBlock = TestUtil.nextInt(random, 2, 100); int maxItemsPerBlock = 2*(Math.max(2, minItemsPerBlock-1)) + random.nextInt(100); - int lowFreqCutoff = _TestUtil.nextInt(random, 2, 100); + int lowFreqCutoff = TestUtil.nextInt(random, 2, 100); add(avoidCodecs, new Lucene41PostingsFormat(minItemsPerBlock, maxItemsPerBlock), @@ -143,13 +143,13 @@ public class RandomCodec extends Lucene46Codec { //with such "wrapper" classes? new TestBloomFilteredLucene41Postings(), new MockSepPostingsFormat(), - new MockFixedIntBlockPostingsFormat(_TestUtil.nextInt(random, 1, 2000)), - new MockVariableIntBlockPostingsFormat( _TestUtil.nextInt(random, 1, 127)), + new MockFixedIntBlockPostingsFormat(TestUtil.nextInt(random, 1, 2000)), + new MockVariableIntBlockPostingsFormat( TestUtil.nextInt(random, 1, 127)), new MockRandomPostingsFormat(random), new NestedPulsingPostingsFormat(), - new Lucene41WithOrds(_TestUtil.nextInt(random, 1, 1000)), - new Lucene41VarGapFixedInterval(_TestUtil.nextInt(random, 1, 1000)), - new Lucene41VarGapDocFreqInterval(_TestUtil.nextInt(random, 1, 100), _TestUtil.nextInt(random, 1, 1000)), + new Lucene41WithOrds(TestUtil.nextInt(random, 1, 1000)), + new Lucene41VarGapFixedInterval(TestUtil.nextInt(random, 1, 1000)), + new Lucene41VarGapDocFreqInterval(TestUtil.nextInt(random, 1, 100), TestUtil.nextInt(random, 1, 1000)), new SimpleTextPostingsFormat(), new AssertingPostingsFormat(), new MemoryPostingsFormat(true, random.nextFloat()), diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java index 1eced5f5727..4790fa127d6 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java @@ -25,14 +25,13 @@ import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.index.IndexWriter; // javadoc import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NullInfoStream; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.Version; -import org.apache.lucene.util._TestUtil; /** Silly class that randomizes the indexing experience. EG * it may swap in a different merge policy/scheduler; may @@ -88,7 +87,7 @@ public class RandomIndexWriter implements Closeable { // TODO: this should be solved in a different way; Random should not be shared (!). this.r = new Random(r.nextLong()); w = mockIndexWriter(dir, c, r); - flushAt = _TestUtil.nextInt(r, 10, 1000); + flushAt = TestUtil.nextInt(r, 10, 1000); codec = w.getConfig().getCodec(); if (LuceneTestCase.VERBOSE) { System.out.println("RIW dir=" + dir + " config=" + w.getConfig()); @@ -155,7 +154,7 @@ public class RandomIndexWriter implements Closeable { System.out.println("RIW.add/updateDocument: now doing a commit at docCount=" + docCount); } w.commit(); - flushAt += _TestUtil.nextInt(r, (int) (flushAtFactor * 10), (int) (flushAtFactor * 1000)); + flushAt += TestUtil.nextInt(r, (int) (flushAtFactor * 10), (int) (flushAtFactor * 1000)); if (flushAtFactor < 2e6) { // gradually but exponentially increase time b/w flushes flushAtFactor *= 1.05; @@ -283,7 +282,7 @@ public class RandomIndexWriter implements Closeable { w.forceMerge(1); } else { // partial forceMerge - final int limit = _TestUtil.nextInt(r, 1, segCount); + final int limit = TestUtil.nextInt(r, 1, segCount); if (LuceneTestCase.VERBOSE) { System.out.println("RIW: doRandomForceMerge(" + limit + ")"); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java index 2c5d792e828..739d45a631c 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java @@ -39,7 +39,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FailOnNonBulkMergesInfoStream; @@ -47,7 +46,7 @@ import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NamedThreadFactory; import org.apache.lucene.util.PrintStreamInfoStream; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; // TODO // - mix in forceMerge, addIndexes @@ -137,12 +136,12 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": now long sleep"); } - Thread.sleep(_TestUtil.nextInt(random(), 50, 500)); + Thread.sleep(TestUtil.nextInt(random(), 50, 500)); } // Rate limit ingest rate: if (random().nextInt(7) == 5) { - Thread.sleep(_TestUtil.nextInt(random(), 1, 10)); + Thread.sleep(TestUtil.nextInt(random(), 1, 10)); if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": done sleep"); } @@ -187,16 +186,16 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas allSubDocs.add(subDocs); doc.add(packIDField); - docsList.add(_TestUtil.cloneDocument(doc)); + docsList.add(TestUtil.cloneDocument(doc)); docIDs.add(doc.get("docid")); - final int maxDocCount = _TestUtil.nextInt(random(), 1, 10); + final int maxDocCount = TestUtil.nextInt(random(), 1, 10); while(docsList.size() < maxDocCount) { doc = docs.nextDoc(); if (doc == null) { break; } - docsList.add(_TestUtil.cloneDocument(doc)); + docsList.add(TestUtil.cloneDocument(doc)); docIDs.add(doc.get("docid")); } addCount.addAndGet(docsList.size()); @@ -317,7 +316,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas } protected void runSearchThreads(final long stopTimeMS) throws Exception { - final int numThreads = _TestUtil.nextInt(random(), 1, 5); + final int numThreads = TestUtil.nextInt(random(), 1, 5); final Thread[] searchThreads = new Thread[numThreads]; final AtomicInteger totHits = new AtomicInteger(); @@ -436,7 +435,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas Random random = new Random(random().nextLong()); final LineFileDocs docs = new LineFileDocs(random, true); - final File tempDir = _TestUtil.getTempDir(testName); + final File tempDir = TestUtil.getTempDir(testName); dir = getDirectory(newMockFSDirectory(tempDir)); // some subclasses rely on this being MDW if (dir instanceof BaseDirectoryWrapper) { ((BaseDirectoryWrapper) dir).setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves. @@ -497,13 +496,13 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas }); } writer = new IndexWriter(dir, conf); - _TestUtil.reduceOpenFiles(writer); + TestUtil.reduceOpenFiles(writer); final ExecutorService es = random().nextBoolean() ? null : Executors.newCachedThreadPool(new NamedThreadFactory(testName)); doAfterWriter(es); - final int NUM_INDEX_THREADS = _TestUtil.nextInt(random(), 2, 4); + final int NUM_INDEX_THREADS = TestUtil.nextInt(random(), 2, 4); final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : RANDOM_MULTIPLIER; @@ -642,9 +641,9 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas es.awaitTermination(1, TimeUnit.SECONDS); } - _TestUtil.checkIndex(dir); + TestUtil.checkIndex(dir); dir.close(); - _TestUtil.rmDir(tempDir); + TestUtil.rmDir(tempDir); if (VERBOSE) { System.out.println("TEST: done [" + (System.currentTimeMillis()-t0) + " ms]"); diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java index a5c30a7d41d..eef42b0984d 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java @@ -25,7 +25,7 @@ import java.util.concurrent.ExecutorService; import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Helper class that adds some extra checks to ensure correct @@ -85,7 +85,7 @@ public class AssertingIndexSearcher extends IndexSearcher { protected Query wrapFilter(Query query, Filter filter) { if (random.nextBoolean()) return super.wrapFilter(query, filter); - return (filter == null) ? query : new FilteredQuery(query, filter, _TestUtil.randomFilterStrategy(random)); + return (filter == null) ? query : new FilteredQuery(query, filter, TestUtil.randomFilterStrategy(random)); } @Override diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java index 68d33928c7a..77107206e74 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java @@ -31,9 +31,8 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Bits; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.automaton.BasicAutomata; import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.junit.AfterClass; @@ -125,7 +124,7 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { * returns random character (a-z) */ static char randomChar() { - return (char) _TestUtil.nextInt(random(), 'a', 'z'); + return (char) TestUtil.nextInt(random(), 'a', 'z'); } /** @@ -173,8 +172,8 @@ public abstract class SearchEquivalenceTestBase extends LuceneTestCase { protected void assertSubsetOf(Query q1, Query q2, Filter filter) throws Exception { // TRUNK ONLY: test both filter code paths if (filter != null && random().nextBoolean()) { - q1 = new FilteredQuery(q1, filter, _TestUtil.randomFilterStrategy(random())); - q2 = new FilteredQuery(q2, filter, _TestUtil.randomFilterStrategy(random())); + q1 = new FilteredQuery(q1, filter, TestUtil.randomFilterStrategy(random())); + q2 = new FilteredQuery(q2, filter, TestUtil.randomFilterStrategy(random())); filter = null; } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java index 4b21b6d6226..2b0a6130caa 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java @@ -36,7 +36,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.PrintStreamInfoStream; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; // TODO // - doc blocks? so we can test joins/grouping... @@ -447,7 +447,7 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { public NodeState(Random random, int nodeID, int numNodes) throws IOException { myNodeID = nodeID; - dir = newFSDirectory(_TestUtil.getTempDir("ShardSearchingTestBase")); + dir = newFSDirectory(TestUtil.getTempDir("ShardSearchingTestBase")); // TODO: set warmer IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE); diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java index 2791f246549..87286da9a3e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java @@ -20,7 +20,7 @@ package org.apache.lucene.store; import java.io.IOException; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; /** * Calls check index on close. @@ -42,7 +42,7 @@ public class BaseDirectoryWrapper extends FilterDirectory { public void close() throws IOException { isOpen = false; if (checkIndexOnClose && DirectoryReader.indexExists(this)) { - _TestUtil.checkIndex(this, crossCheckTermVectorsOnClose); + TestUtil.checkIndex(this, crossCheckTermVectorsOnClose); } super.close(); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java index 0ee23000833..e8eb0290d24 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java @@ -42,8 +42,8 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoDeletionPolicy; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.ThrottledIndexOutput; -import org.apache.lucene.util._TestUtil; /** * This is a Directory Wrapper that adds methods @@ -661,7 +661,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { if (LuceneTestCase.VERBOSE) { System.out.println("\nNOTE: MockDirectoryWrapper: now run CheckIndex"); } - _TestUtil.checkIndex(this, getCrossCheckTermVectorsOnClose()); + TestUtil.checkIndex(this, getCrossCheckTermVectorsOnClose()); // TODO: factor this out / share w/ TestIW.assertNoUnreferencedFiles if (assertNoUnreferencedFilesOnClose) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/BaseDocIdSetTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/BaseDocIdSetTestCase.java index 1d5406af54d..a92d275eea8 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/BaseDocIdSetTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/BaseDocIdSetTestCase.java @@ -86,7 +86,7 @@ public abstract class BaseDocIdSetTestCase extends LuceneTes /** Compare the content of the set against a {@link BitSet}. */ public void testAgainstBitSet() throws IOException { - final int numBits = _TestUtil.nextInt(random(), 100, 1 << 20); + final int numBits = TestUtil.nextInt(random(), 100, 1 << 20); // test various random sets with various load factors for (float percentSet : new float[] {0f, 0.0001f, random().nextFloat() / 2, 0.9f, 1f}) { final BitSet set = randomSet(numBits, percentSet); @@ -103,7 +103,7 @@ public abstract class BaseDocIdSetTestCase extends LuceneTes copy = copyOf(set, numBits); // then random index assertEquals(numBits, set, copy); // test regular increments - for (int inc = 2; inc < 1000; inc += _TestUtil.nextInt(random(), 1, 100)) { + for (int inc = 2; inc < 1000; inc += TestUtil.nextInt(random(), 1, 100)) { set = new BitSet(numBits); for (int d = random().nextInt(10); d < numBits; d += inc) { set.set(d); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/CloseableFile.java b/lucene/test-framework/src/java/org/apache/lucene/util/CloseableFile.java index 7aedb3fec35..33737b10189 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/CloseableFile.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/CloseableFile.java @@ -37,7 +37,7 @@ final class CloseableFile implements Closeable { if (failureMarker.wasSuccessful()) { if (file.exists()) { try { - _TestUtil.rmDir(file); + TestUtil.rmDir(file); } catch (IOException e) { // Ignore the exception from rmDir. } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java index b37d461b6b4..2ea67e5058b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java @@ -638,7 +638,7 @@ public abstract class LuceneTestCase extends Assert { public static int atLeast(Random random, int i) { int min = (TEST_NIGHTLY ? 2*i : i) * RANDOM_MULTIPLIER; int max = min+(min/2); - return _TestUtil.nextInt(random, min, max); + return TestUtil.nextInt(random, min, max); } public static int atLeast(int i) { @@ -744,8 +744,8 @@ public abstract class LuceneTestCase extends Assert { if (r.nextBoolean()) { c.setMergeScheduler(new SerialMergeScheduler()); } else if (rarely(r)) { - int maxThreadCount = _TestUtil.nextInt(random(), 1, 4); - int maxMergeCount = _TestUtil.nextInt(random(), maxThreadCount, maxThreadCount+4); + int maxThreadCount = TestUtil.nextInt(random(), 1, 4); + int maxMergeCount = TestUtil.nextInt(random(), maxThreadCount, maxThreadCount + 4); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount); c.setMergeScheduler(cms); @@ -753,15 +753,15 @@ public abstract class LuceneTestCase extends Assert { if (r.nextBoolean()) { if (rarely(r)) { // crazy value - c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 15)); + c.setMaxBufferedDocs(TestUtil.nextInt(r, 2, 15)); } else { // reasonable value - c.setMaxBufferedDocs(_TestUtil.nextInt(r, 16, 1000)); + c.setMaxBufferedDocs(TestUtil.nextInt(r, 16, 1000)); } } if (r.nextBoolean()) { - int maxNumThreadStates = rarely(r) ? _TestUtil.nextInt(r, 5, 20) // crazy value - : _TestUtil.nextInt(r, 1, 4); // reasonable value + int maxNumThreadStates = rarely(r) ? TestUtil.nextInt(r, 5, 20) // crazy value + : TestUtil.nextInt(r, 1, 4); // reasonable value Method setIndexerThreadPoolMethod = null; try { @@ -844,9 +844,9 @@ public abstract class LuceneTestCase extends Assert { LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy(); logmp.setCalibrateSizeByDeletes(r.nextBoolean()); if (rarely(r)) { - logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 9)); + logmp.setMergeFactor(TestUtil.nextInt(r, 2, 9)); } else { - logmp.setMergeFactor(_TestUtil.nextInt(r, 10, 50)); + logmp.setMergeFactor(TestUtil.nextInt(r, 10, 50)); } configureRandom(r, logmp); return logmp; @@ -869,11 +869,11 @@ public abstract class LuceneTestCase extends Assert { public static TieredMergePolicy newTieredMergePolicy(Random r) { TieredMergePolicy tmp = new TieredMergePolicy(); if (rarely(r)) { - tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 9)); - tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 9)); + tmp.setMaxMergeAtOnce(TestUtil.nextInt(r, 2, 9)); + tmp.setMaxMergeAtOnceExplicit(TestUtil.nextInt(r, 2, 9)); } else { - tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 10, 50)); - tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 10, 50)); + tmp.setMaxMergeAtOnce(TestUtil.nextInt(r, 10, 50)); + tmp.setMaxMergeAtOnceExplicit(TestUtil.nextInt(r, 10, 50)); } if (rarely(r)) { tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0); @@ -883,9 +883,9 @@ public abstract class LuceneTestCase extends Assert { tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0); tmp.setForceMergeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0); if (rarely(r)) { - tmp.setSegmentsPerTier(_TestUtil.nextInt(r, 2, 20)); + tmp.setSegmentsPerTier(TestUtil.nextInt(r, 2, 20)); } else { - tmp.setSegmentsPerTier(_TestUtil.nextInt(r, 10, 50)); + tmp.setSegmentsPerTier(TestUtil.nextInt(r, 10, 50)); } configureRandom(r, tmp); tmp.setReclaimDeletesWeight(r.nextDouble()*4); @@ -1160,7 +1160,7 @@ public abstract class LuceneTestCase extends Assert { final Class clazz = CommandLineUtil.loadDirectoryClass(clazzName); // If it is a FSDirectory type, try its ctor(File) if (FSDirectory.class.isAssignableFrom(clazz)) { - final File dir = _TestUtil.getTempDir("index"); + final File dir = TestUtil.getTempDir("index"); dir.mkdirs(); // ensure it's created so we 'have' it. return newFSDirectoryImpl(clazz.asSubclass(FSDirectory.class), dir); } @@ -1256,7 +1256,7 @@ public abstract class LuceneTestCase extends Assert { } else if (oldContext.mergeInfo != null) { // Always return at least the estimatedMergeBytes of // the incoming IOContext: - return new IOContext(new MergeInfo(randomNumDocs, Math.max(oldContext.mergeInfo.estimatedMergeBytes, size), random.nextBoolean(), _TestUtil.nextInt(random, 1, 100))); + return new IOContext(new MergeInfo(randomNumDocs, Math.max(oldContext.mergeInfo.estimatedMergeBytes, size), random.nextBoolean(), TestUtil.nextInt(random, 1, 100))); } else { // Make a totally random IOContext: final IOContext context; @@ -1312,7 +1312,7 @@ public abstract class LuceneTestCase extends Assert { // TODO: not useful to check DirectoryReader (redundant with checkindex) // but maybe sometimes run this on the other crazy readers maybeWrapReader creates? try { - _TestUtil.checkReader(r); + TestUtil.checkReader(r); } catch (IOException e) { throw new AssertionError(e); } @@ -1326,7 +1326,7 @@ public abstract class LuceneTestCase extends Assert { if (random.nextBoolean()) { ex = null; } else { - threads = _TestUtil.nextInt(random, 1, 8); + threads = TestUtil.nextInt(random, 1, 8); ex = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("LuceneTestCase")); @@ -1340,7 +1340,7 @@ public abstract class LuceneTestCase extends Assert { r.addReaderClosedListener(new ReaderClosedListener() { @Override public void onClose(IndexReader reader) { - _TestUtil.shutdownExecutorService(ex); + TestUtil.shutdownExecutorService(ex); } }); } @@ -1763,7 +1763,7 @@ public abstract class LuceneTestCase extends Assert { tests.add(new BytesRef(new byte[] {(byte) 0xFF, (byte) 0xFF})); // past the last term break; case 2: - tests.add(new BytesRef(_TestUtil.randomSimpleString(random()))); // random term + tests.add(new BytesRef(TestUtil.randomSimpleString(random()))); // random term break; default: throw new AssertionError(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java similarity index 97% rename from lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java rename to lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java index bcd0115d804..0f4337a5c2e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java @@ -69,7 +69,6 @@ import org.apache.lucene.index.ConcurrentMergeScheduler; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfo.DocValuesType; -import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexableField; @@ -77,8 +76,6 @@ import org.apache.lucene.index.LogMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.MultiFields; -import org.apache.lucene.index.SegmentCommitInfo; -import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TieredMergePolicy; @@ -97,7 +94,10 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; /** * General utility methods for Lucene unit tests. */ -public class _TestUtil { +public final class TestUtil { + private TestUtil() { + // + } // the max number of retries we're going to do in getTempDir private static final int GET_TEMP_DIR_RETRY_THRESHOLD = 1000; @@ -297,7 +297,7 @@ public class _TestUtil { } final char[] buffer = new char[end]; for (int i = 0; i < end; i++) { - buffer[i] = (char) _TestUtil.nextInt(r, 'a', 'z'); + buffer[i] = (char) TestUtil.nextInt(r, 'a', 'z'); } return new String(buffer, 0, end); } @@ -310,7 +310,7 @@ public class _TestUtil { } final char[] buffer = new char[end]; for (int i = 0; i < end; i++) { - buffer[i] = (char) _TestUtil.nextInt(r, minChar, maxChar); + buffer[i] = (char) TestUtil.nextInt(r, minChar, maxChar); } return new String(buffer, 0, end); } @@ -1006,7 +1006,7 @@ public class _TestUtil { final String nonBmpString = "AB\uD840\uDC00C"; while (true) { try { - Pattern p = Pattern.compile(_TestUtil.randomRegexpishString(random)); + Pattern p = Pattern.compile(TestUtil.randomRegexpishString(random)); String replacement = null; // ignore bugs in Sun's regex impl try { @@ -1080,7 +1080,7 @@ public class _TestUtil { // otherwise, try to make it more realistic with 'words' since most tests use MockTokenizer // first decide how big the string will really be: 0..n maxLength = random.nextInt(maxLength); - int avgWordLength = _TestUtil.nextInt(random, 3, 8); + int avgWordLength = TestUtil.nextInt(random, 3, 8); StringBuilder sb = new StringBuilder(); while (sb.length() < maxLength) { if (sb.length() > 0) { @@ -1101,25 +1101,25 @@ public class _TestUtil { return ""; } - int evilness = _TestUtil.nextInt(random, 0, 20); + int evilness = TestUtil.nextInt(random, 0, 20); StringBuilder sb = new StringBuilder(); while (sb.length() < wordLength) {; if (simple) { - sb.append(random.nextBoolean() ? _TestUtil.randomSimpleString(random, wordLength) : _TestUtil.randomHtmlishString(random, wordLength)); + sb.append(random.nextBoolean() ? TestUtil.randomSimpleString(random, wordLength) : TestUtil.randomHtmlishString(random, wordLength)); } else { if (evilness < 10) { - sb.append(_TestUtil.randomSimpleString(random, wordLength)); + sb.append(TestUtil.randomSimpleString(random, wordLength)); } else if (evilness < 15) { assert sb.length() == 0; // we should always get wordLength back! - sb.append(_TestUtil.randomRealisticUnicodeString(random, wordLength, wordLength)); + sb.append(TestUtil.randomRealisticUnicodeString(random, wordLength, wordLength)); } else if (evilness == 16) { - sb.append(_TestUtil.randomHtmlishString(random, wordLength)); + sb.append(TestUtil.randomHtmlishString(random, wordLength)); } else if (evilness == 17) { // gives a lot of punctuation - sb.append(_TestUtil.randomRegexpishString(random, wordLength)); + sb.append(TestUtil.randomRegexpishString(random, wordLength)); } else { - sb.append(_TestUtil.randomUnicodeString(random, wordLength)); + sb.append(TestUtil.randomUnicodeString(random, wordLength)); } } } @@ -1132,7 +1132,7 @@ public class _TestUtil { if (random.nextInt(17) == 0) { // mix up case - String mixedUp = _TestUtil.randomlyRecaseCodePoints(random, sb.toString()); + String mixedUp = TestUtil.randomlyRecaseCodePoints(random, sb.toString()); assert mixedUp.length() == sb.length(); return mixedUp; } else { diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java index e770a69ee80..67f5eaae829 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java @@ -28,8 +28,8 @@ import java.util.Random; import java.util.Set; import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.UnicodeUtil; -import org.apache.lucene.util._TestUtil; /** * Utilities for testing automata. @@ -65,14 +65,14 @@ public class AutomatonTestUtil { if (0 == t && i < end - 1) { // Make a surrogate pair // High surrogate - buffer[i++] = (char) _TestUtil.nextInt(r, 0xd800, 0xdbff); + buffer[i++] = (char) TestUtil.nextInt(r, 0xd800, 0xdbff); // Low surrogate - buffer[i] = (char) _TestUtil.nextInt(r, 0xdc00, 0xdfff); + buffer[i] = (char) TestUtil.nextInt(r, 0xdc00, 0xdfff); } else if (t <= 1) buffer[i] = (char) r.nextInt(0x80); - else if (2 == t) buffer[i] = (char) _TestUtil.nextInt(r, 0x80, 0x800); - else if (3 == t) buffer[i] = (char) _TestUtil.nextInt(r, 0x800, 0xd7ff); - else if (4 == t) buffer[i] = (char) _TestUtil.nextInt(r, 0xe000, 0xffff); + else if (2 == t) buffer[i] = (char) TestUtil.nextInt(r, 0x80, 0x800); + else if (3 == t) buffer[i] = (char) TestUtil.nextInt(r, 0x800, 0xd7ff); + else if (4 == t) buffer[i] = (char) TestUtil.nextInt(r, 0xe000, 0xffff); else if (5 == t) buffer[i] = '.'; else if (6 == t) buffer[i] = '?'; else if (7 == t) buffer[i] = '*'; diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/fst/FSTTester.java b/lucene/test-framework/src/java/org/apache/lucene/util/fst/FSTTester.java index b68e97ffb2a..bf5c8acc50e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/fst/FSTTester.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/fst/FSTTester.java @@ -38,8 +38,8 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.UnicodeUtil; -import org.apache.lucene.util._TestUtil; import org.apache.lucene.util.packed.PackedInts; import static org.junit.Assert.assertEquals; @@ -97,7 +97,7 @@ public class FSTTester { static String getRandomString(Random random) { final String term; if (random.nextBoolean()) { - term = _TestUtil.randomRealisticUnicodeString(random); + term = TestUtil.randomRealisticUnicodeString(random); } else { // we want to mix in limited-alphabet symbols so // we get more sharing of the nodes given how few @@ -115,7 +115,7 @@ public class FSTTester { } final char[] buffer = new char[end]; for (int i = 0; i < end; i++) { - buffer[i] = (char) _TestUtil.nextInt(r, 97, 102); + buffer[i] = (char) TestUtil.nextInt(r, 97, 102); } return new String(buffer, 0, end); } @@ -188,10 +188,10 @@ public class FSTTester { if (testPruning) { // simple pruning - doTest(_TestUtil.nextInt(random, 1, 1+pairs.size()), 0, true); + doTest(TestUtil.nextInt(random, 1, 1 + pairs.size()), 0, true); // leafy pruning - doTest(0, _TestUtil.nextInt(random, 1, 1+pairs.size()), true); + doTest(0, TestUtil.nextInt(random, 1, 1 + pairs.size()), true); } } @@ -285,7 +285,7 @@ public class FSTTester { prune1, prune2, prune1==0 && prune2==0, allowRandomSuffixSharing ? random.nextBoolean() : true, - allowRandomSuffixSharing ? _TestUtil.nextInt(random, 1, 10) : Integer.MAX_VALUE, + allowRandomSuffixSharing ? TestUtil.nextInt(random, 1, 10) : Integer.MAX_VALUE, outputs, null, willRewrite, @@ -434,7 +434,7 @@ public class FSTTester { final int num = LuceneTestCase.atLeast(random, 100); for(int iter=0;iter allFieldNames = getAllSortFieldNames(); - final int initialDocs = _TestUtil.nextInt(random(),100,200); + final int initialDocs = TestUtil.nextInt(random(), 100, 200); final int totalDocs = atLeast(5000); // start with a smallish number of documents, and test that we can do a full walk using a @@ -568,7 +568,7 @@ public class CursorPagingTest extends SolrTestCaseJ4 { for (String f : allFieldNames) { for (String order : new String[] {" asc", " desc"}) { String sort = f + order + ("id".equals(f) ? "" : ", id" + order); - String rows = "" + _TestUtil.nextInt(random(),13,50); + String rows = "" + TestUtil.nextInt(random(), 13, 50); SentinelIntSet ids = assertFullWalkNoDups(totalDocs, params("q", "*:*", "fl","id", @@ -588,7 +588,7 @@ public class CursorPagingTest extends SolrTestCaseJ4 { final int numRandomSorts = atLeast(5); for (int i = 0; i < numRandomSorts; i++) { final String sort = buildRandomSort(allFieldNames); - final String rows = "" + _TestUtil.nextInt(random(),63,113); + final String rows = "" + TestUtil.nextInt(random(), 63, 113); final String fl = random().nextBoolean() ? "id" : "id,score"; final boolean matchAll = random().nextBoolean(); final String q = matchAll ? "*:*" : buildRandomQuery(); @@ -609,12 +609,12 @@ public class CursorPagingTest extends SolrTestCaseJ4 { * of test multiplier and nightly status */ private static boolean useField() { - return 0 != _TestUtil.nextInt(random(), 0, 30); + return 0 != TestUtil.nextInt(random(), 0, 30); } /** returns likely most (1/10) of the time, otherwise unlikely */ private static Object skewed(Object likely, Object unlikely) { - return (0 == _TestUtil.nextInt(random(), 0, 9)) ? unlikely : likely; + return (0 == TestUtil.nextInt(random(), 0, 9)) ? unlikely : likely; } /** @@ -710,7 +710,7 @@ public class CursorPagingTest extends SolrTestCaseJ4 { * test faceting with deep paging */ public void testFacetingWithRandomSorts() throws Exception { - final int numDocs = _TestUtil.nextInt(random(), 1000, 3000); + final int numDocs = TestUtil.nextInt(random(), 1000, 3000); String[] fieldsToFacetOn = { "int", "long", "str" }; String[] facetMethods = { "enum", "fc", "fcs" }; @@ -723,14 +723,14 @@ public class CursorPagingTest extends SolrTestCaseJ4 { Collection allFieldNames = getAllSortFieldNames(); String[] fieldNames = new String[allFieldNames.size()]; allFieldNames.toArray(fieldNames); - String f = fieldNames[_TestUtil.nextInt(random(), 0, fieldNames.length - 1)]; - String order = 0 == _TestUtil.nextInt(random(), 0, 1) ? " asc" : " desc"; + String f = fieldNames[TestUtil.nextInt(random(), 0, fieldNames.length - 1)]; + String order = 0 == TestUtil.nextInt(random(), 0, 1) ? " asc" : " desc"; String sort = f + order + (f.equals("id") ? "" : ", id" + order); - String rows = "" + _TestUtil.nextInt(random(),13,50); + String rows = "" + TestUtil.nextInt(random(), 13, 50); String facetField = fieldsToFacetOn - [_TestUtil.nextInt(random(), 0, fieldsToFacetOn.length - 1)]; + [TestUtil.nextInt(random(), 0, fieldsToFacetOn.length - 1)]; String facetMethod = facetMethods - [_TestUtil.nextInt(random(), 0, facetMethods.length - 1)]; + [TestUtil.nextInt(random(), 0, facetMethods.length - 1)]; SentinelIntSet ids = assertFullWalkNoDupsWithFacets (numDocs, params("q", "*:*", "fl", "id," + facetField, @@ -876,11 +876,11 @@ public class CursorPagingTest extends SolrTestCaseJ4 { // (hopefully with lots of duplication) if (useField()) { doc.addField("int", skewed(random().nextInt(), - _TestUtil.nextInt(random(), 20, 50))); + TestUtil.nextInt(random(), 20, 50))); } if (useField()) { doc.addField("long", skewed(random().nextLong(), - _TestUtil.nextInt(random(), 5000, 5100))); + TestUtil.nextInt(random(), 5000, 5100))); } if (useField()) { doc.addField("float", skewed(random().nextFloat() * random().nextInt(), @@ -892,11 +892,11 @@ public class CursorPagingTest extends SolrTestCaseJ4 { } if (useField()) { doc.addField("str", skewed(randomUsableUnicodeString(), - _TestUtil.randomSimpleString(random(),1,1))); + TestUtil.randomSimpleString(random(), 1, 1))); } if (useField()) { - int numBytes = (int) skewed(_TestUtil.nextInt(random(), 20, 50), 2); + int numBytes = (int) skewed(TestUtil.nextInt(random(), 20, 50), 2); byte[] randBytes = new byte[numBytes]; random().nextBytes(randBytes); doc.addField("bin", ByteBuffer.wrap(randBytes)); @@ -917,8 +917,8 @@ public class CursorPagingTest extends SolrTestCaseJ4 { return "{!func}" + numericFields.get(0); } else { // several SHOULD clauses on range queries - int low = _TestUtil.nextInt(random(),-2379,2); - int high = _TestUtil.nextInt(random(),4,5713); + int low = TestUtil.nextInt(random(), -2379, 2); + int high = TestUtil.nextInt(random(), 4, 5713); return numericFields.get(0) + ":[* TO 0] " + numericFields.get(1) + ":[0 TO *] " + @@ -931,10 +931,10 @@ public class CursorPagingTest extends SolrTestCaseJ4 { * updates use XML we need to ensure we don't get "special" code block. */ private static String randomUsableUnicodeString() { - String result = _TestUtil.randomRealisticUnicodeString(random()); + String result = TestUtil.randomRealisticUnicodeString(random()); if (result.matches(".*\\p{InSpecials}.*")) { // oh well - result = _TestUtil.randomSimpleString(random()); + result = TestUtil.randomSimpleString(random()); } return result; } @@ -958,7 +958,7 @@ public class CursorPagingTest extends SolrTestCaseJ4 { // wrap in a function sometimes if ( (!"score".equals(field)) && - (0 == _TestUtil.nextInt(random(), 0, 7)) ) { + (0 == TestUtil.nextInt(random(), 0, 7)) ) { // specific function doesn't matter, just proving that we can handle the concept. // but we do have to be careful with non numeric fields if (field.startsWith("str") || field.startsWith("bin")) { diff --git a/solr/core/src/test/org/apache/solr/TestHighlightDedupGrouping.java b/solr/core/src/test/org/apache/solr/TestHighlightDedupGrouping.java index 244375730b5..7529dd9912e 100644 --- a/solr/core/src/test/org/apache/solr/TestHighlightDedupGrouping.java +++ b/solr/core/src/test/org/apache/solr/TestHighlightDedupGrouping.java @@ -17,7 +17,8 @@ package org.apache.solr; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.common.SolrInputDocument; @@ -85,20 +86,20 @@ public class TestHighlightDedupGrouping extends BaseDistributedSearchTestCase { handle.put("timestamp", SKIPVAL); handle.put("grouped", UNORDERED); // distrib grouping doesn't guarantee order of top level group commands - int numDocs = _TestUtil.nextInt(random(), 100, 1000); - int numGroups = _TestUtil.nextInt(random(), 1, numDocs / 50); + int numDocs = TestUtil.nextInt(random(), 100, 1000); + int numGroups = TestUtil.nextInt(random(), 1, numDocs / 50); int[] docsInGroup = new int[numGroups + 1]; - int percentDuplicates = _TestUtil.nextInt(random(), 1, 25); + int percentDuplicates = TestUtil.nextInt(random(), 1, 25); for (int docid = 0 ; docid < numDocs ; ++docid) { - int group = _TestUtil.nextInt(random(), 1, numGroups); + int group = TestUtil.nextInt(random(), 1, numGroups); ++docsInGroup[group]; - boolean makeDuplicate = 0 == _TestUtil.nextInt(random(), 0, numDocs / percentDuplicates); + boolean makeDuplicate = 0 == TestUtil.nextInt(random(), 0, numDocs / percentDuplicates); if (makeDuplicate) { for (int shard = 0 ; shard < shardCount ; ++shard) { addDoc(docid, group, shard); } } else { - int shard = _TestUtil.nextInt(random(), 0, shardCount - 1); + int shard = TestUtil.nextInt(random(), 0, shardCount - 1); addDoc(docid, group, shard); } } diff --git a/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java index 13a927f42f5..9f1274456c0 100644 --- a/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java +++ b/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java @@ -24,7 +24,7 @@ import java.util.Map; import java.util.Random; import org.apache.lucene.search.FieldCache; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.solr.common.params.ModifiableSolrParams; @@ -202,7 +202,7 @@ public class TestRandomDVFaceting extends SolrTestCaseJ4 { if ((ftype.vals instanceof SVal) && rand.nextInt(100) < 20) { // validate = false; String prefix = ftype.createValue().toString(); - if (rand.nextInt(100) < 5) prefix = _TestUtil.randomUnicodeString(rand); + if (rand.nextInt(100) < 5) prefix = TestUtil.randomUnicodeString(rand); else if (rand.nextInt(100) < 10) prefix = Character.toString((char)rand.nextInt(256)); else if (prefix.length() > 0) prefix = prefix.substring(0, rand.nextInt(prefix.length())); params.add("facet.prefix", prefix); diff --git a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java index 04307a77ea5..6ee53a9e5ae 100644 --- a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java +++ b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java @@ -18,7 +18,7 @@ package org.apache.solr; import org.apache.lucene.search.FieldCache; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.request.SolrQueryRequest; @@ -185,7 +185,7 @@ public class TestRandomFaceting extends SolrTestCaseJ4 { if ((ftype.vals instanceof SVal) && rand.nextInt(100) < 20) { // validate = false; String prefix = ftype.createValue().toString(); - if (rand.nextInt(100) < 5) prefix = _TestUtil.randomUnicodeString(rand); + if (rand.nextInt(100) < 5) prefix = TestUtil.randomUnicodeString(rand); else if (rand.nextInt(100) < 10) prefix = Character.toString((char)rand.nextInt(256)); else if (prefix.length() > 0) prefix = prefix.substring(0, rand.nextInt(prefix.length())); params.add("facet.prefix", prefix); diff --git a/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java b/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java index 218dc0e8618..7579acc4cb6 100644 --- a/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java +++ b/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java @@ -30,7 +30,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.junit.Ignore; public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { @@ -277,7 +277,7 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testRandomBrokenHTML() throws Exception { int maxNumElements = 10000; - String text = _TestUtil.randomHtmlishString(random(), maxNumElements); + String text = TestUtil.randomHtmlishString(random(), maxNumElements); Reader reader = new LegacyHTMLStripCharFilter(new StringReader(text)); while (reader.read() != -1); @@ -289,18 +289,18 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { int maxNumWords = 10000; int minWordLength = 3; int maxWordLength = 20; - int numWords = _TestUtil.nextInt(random(), minNumWords, maxNumWords); - switch (_TestUtil.nextInt(random(), 0, 4)) { + int numWords = TestUtil.nextInt(random(), minNumWords, maxNumWords); + switch (TestUtil.nextInt(random(), 0, 4)) { case 0: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomUnicodeString(random(), maxWordLength)); + text.append(TestUtil.randomUnicodeString(random(), maxWordLength)); text.append(' '); } break; } case 1: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomRealisticUnicodeString + text.append(TestUtil.randomRealisticUnicodeString (random(), minWordLength, maxWordLength)); text.append(' '); } @@ -308,7 +308,7 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { } default: { // ASCII 50% of the time for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomSimpleString(random())); + text.append(TestUtil.randomSimpleString(random())); text.append(' '); } } diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java index 32b80aadefa..0e7c1ffd4d6 100644 --- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java @@ -46,7 +46,7 @@ import javax.management.MBeanServerFactory; import javax.management.ObjectName; import org.apache.lucene.util.LuceneTestCase.Slow; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.SolrRequest; @@ -634,8 +634,8 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa int cnt = random().nextInt(TEST_NIGHTLY ? 6 : 3) + 1; for (int i = 0; i < cnt; i++) { - int numShards = _TestUtil.nextInt(random(), 0, shardCount) + 1; - int replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 1; + int numShards = TestUtil.nextInt(random(), 0, shardCount) + 1; + int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1; int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer() .getZkStateReader().getClusterState().getLiveNodes().size())) + 1; @@ -919,8 +919,8 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa for (int i = 0; i < cnt; i++) { String collectionName = "awholynewstresscollection_" + name + "_" + i; - int numShards = _TestUtil.nextInt(random(), 0, shardCount * 2) + 1; - int replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 1; + int numShards = TestUtil.nextInt(random(), 0, shardCount * 2) + 1; + int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1; int maxShardsPerNode = (((numShards * 2 * replicationFactor) / getCommonCloudSolrServer() .getZkStateReader().getClusterState().getLiveNodes().size())) + 1; diff --git a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java index efdc95792ef..1a2ea9331ef 100644 --- a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java @@ -38,9 +38,8 @@ import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.apache.lucene.util.Constants; import org.apache.lucene.util.LuceneTestCase.Slow; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.SolrRequest; import org.apache.solr.client.solrj.SolrServerException; @@ -57,13 +56,10 @@ import org.apache.solr.common.cloud.ZkNodeProps; import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.common.params.CollectionParams.CollectionAction; import org.apache.solr.common.params.ModifiableSolrParams; -import org.apache.solr.common.params.ShardParams; -import org.apache.solr.common.util.NamedList; import org.apache.solr.update.DirectUpdateHandler2; import org.apache.solr.util.DefaultSolrThreadFactory; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Ignore; /** * Tests the Custom Sharding API. @@ -147,7 +143,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase { // create new collections rapid fire Map> collectionInfos = new HashMap>(); - int replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 2; + int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2; int cnt = random().nextInt(6) + 1; @@ -297,7 +293,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase { int numShards = 4; - replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 2; + replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2; int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer() .getZkStateReader().getClusterState().getLiveNodes().size())) + 1; diff --git a/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java b/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java index b5e61ec425a..e0b310a141b 100644 --- a/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java @@ -17,7 +17,8 @@ package org.apache.solr.cloud; import org.apache.lucene.util.LuceneTestCase.Slow; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.SentinelIntSet; import org.apache.solr.CursorPagingTest; import org.apache.solr.client.solrj.SolrServer; @@ -29,7 +30,6 @@ import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; -import org.apache.solr.common.SolrInputField; import org.apache.solr.common.util.NamedList; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.params.CommonParams; @@ -46,7 +46,6 @@ import java.io.IOException; import java.util.List; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.Map; /** @@ -518,7 +517,7 @@ public class DistribCursorPagingTest extends AbstractFullDistribZkTestBase { public void doRandomSortsOnLargeIndex() throws Exception { final Collection allFieldNames = getAllSortFieldNames(); - final int numInitialDocs = _TestUtil.nextInt(random(),100,200); + final int numInitialDocs = TestUtil.nextInt(random(), 100, 200); final int totalDocs = atLeast(5000); // start with a smallish number of documents, and test that we can do a full walk using a @@ -535,7 +534,7 @@ public class DistribCursorPagingTest extends AbstractFullDistribZkTestBase { for (String f : allFieldNames) { for (String order : new String[] {" asc", " desc"}) { String sort = f + order + ("id".equals(f) ? "" : ", id" + order); - String rows = "" + _TestUtil.nextInt(random(),13,50); + String rows = "" + TestUtil.nextInt(random(), 13, 50); SentinelIntSet ids = assertFullWalkNoDups(numInitialDocs, params("q", "*:*", "fl","id,"+f, @@ -579,7 +578,7 @@ public class DistribCursorPagingTest extends AbstractFullDistribZkTestBase { final int numRandomSorts = atLeast(5); for (int i = 0; i < numRandomSorts; i++) { final String sort = CursorPagingTest.buildRandomSort(allFieldNames); - final String rows = "" + _TestUtil.nextInt(random(),63,113); + final String rows = "" + TestUtil.nextInt(random(), 63, 113); final String fl = random().nextBoolean() ? "id" : "id,score"; final boolean matchAll = random().nextBoolean(); final String q = matchAll ? "*:*" : CursorPagingTest.buildRandomQuery(); diff --git a/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java b/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java index 1ffe0deb0d0..c97d500e188 100644 --- a/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java +++ b/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java @@ -28,7 +28,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.search.QueryResultKey; import org.junit.Test; @@ -165,10 +165,10 @@ public class QueryResultKeyTest extends SolrTestCaseJ4 { * the array is garunteed to always have at least 1 element */ private int[] smallArrayOfRandomNumbers() { - int size = _TestUtil.nextInt(random(), 1, 5); + int size = TestUtil.nextInt(random(), 1, 5); int[] result = new int[size]; for (int i=0; i < size; i++) { - result[i] = _TestUtil.nextInt(random(), 1, 5); + result[i] = TestUtil.nextInt(random(), 1, 5); } return result; } diff --git a/solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java b/solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java index 48ac4715f5d..c9ea6d8bad1 100644 --- a/solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java +++ b/solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java @@ -22,7 +22,7 @@ import junit.framework.Assert; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordTokenizerFactory; import org.apache.lucene.analysis.ngram.NGramFilterFactory; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrException; import org.apache.solr.handler.admin.LukeRequestHandler; @@ -60,7 +60,7 @@ public class ResourceLoaderTest extends SolrTestCaseJ4 } public void testEscapeInstanceDir() throws Exception { - File temp = _TestUtil.getTempDir("testEscapeInstanceDir"); + File temp = TestUtil.getTempDir("testEscapeInstanceDir"); try { temp.mkdirs(); new File(temp, "dummy.txt").createNewFile(); @@ -76,7 +76,7 @@ public class ResourceLoaderTest extends SolrTestCaseJ4 } loader.close(); } finally { - _TestUtil.rmDir(temp); + TestUtil.rmDir(temp); } } @@ -170,7 +170,7 @@ public class ResourceLoaderTest extends SolrTestCaseJ4 } public void testClassLoaderLibs() throws Exception { - File tmpRoot = _TestUtil.getTempDir("testClassLoaderLibs"); + File tmpRoot = TestUtil.getTempDir("testClassLoaderLibs"); File lib = new File(tmpRoot, "lib"); lib.mkdirs(); diff --git a/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java b/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java index b66b8f938cb..25d1e6a78b6 100644 --- a/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java +++ b/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java @@ -19,7 +19,7 @@ package org.apache.solr.core; import org.apache.commons.io.FileUtils; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.handler.admin.CollectionsHandler; import org.apache.solr.handler.admin.CoreAdminHandler; @@ -240,7 +240,7 @@ public class TestCoreContainer extends SolrTestCaseJ4 { @Test public void testSharedLib() throws Exception { - File tmpRoot = _TestUtil.getTempDir("testSharedLib"); + File tmpRoot = TestUtil.getTempDir("testSharedLib"); File lib = new File(tmpRoot, "lib"); lib.mkdirs(); diff --git a/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java b/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java index 181bd4753f7..0443f2d0ea4 100644 --- a/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java +++ b/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java @@ -19,7 +19,7 @@ package org.apache.solr.core; import org.apache.commons.io.FileUtils; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.core.SolrXMLSerializer.SolrCoreXMLDef; import org.apache.solr.core.SolrXMLSerializer.SolrXMLDef; import org.junit.Test; @@ -81,7 +81,7 @@ public class TestSolrXMLSerializer extends LuceneTestCase { assertResults(((StringWriter) w).getBuffer().toString().getBytes("UTF-8")); // again with default file - File tmpFile = _TestUtil.createTempFile("solr.xml", null, TEMP_DIR); + File tmpFile = TestUtil.createTempFile("solr.xml", null, TEMP_DIR); serializer.persistFile(tmpFile, solrXMLDef); diff --git a/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java b/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java index 1f31d418f54..a51187ce6e8 100644 --- a/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java +++ b/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java @@ -17,7 +17,7 @@ package org.apache.solr.handler; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.common.params.CommonParams; @@ -50,7 +50,7 @@ public class TestCSVLoader extends SolrTestCaseJ4 { // if you override setUp or tearDown, you better call // the super classes version super.setUp(); - File tempDir = _TestUtil.getTempDir("TestCSVLoader"); + File tempDir = TestUtil.getTempDir("TestCSVLoader"); file = new File(tempDir, "solr_tmp.csv"); filename = file.getPath(); cleanup(); diff --git a/solr/core/src/test/org/apache/solr/schema/TestCollationField.java b/solr/core/src/test/org/apache/solr/schema/TestCollationField.java index 08ea28abec6..71a5733bb60 100644 --- a/solr/core/src/test/org/apache/solr/schema/TestCollationField.java +++ b/solr/core/src/test/org/apache/solr/schema/TestCollationField.java @@ -23,7 +23,7 @@ import java.text.Collator; import java.text.RuleBasedCollator; import java.util.Locale; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; @@ -63,7 +63,7 @@ public class TestCollationField extends SolrTestCaseJ4 { */ public static String setupSolrHome() throws Exception { // make a solr home underneath the test's TEMP_DIR - File tmpFile = _TestUtil.getTempDir("collation1"); + File tmpFile = TestUtil.getTempDir("collation1"); tmpFile.delete(); tmpFile.mkdir(); diff --git a/solr/core/src/test/org/apache/solr/schema/TestCollationFieldDocValues.java b/solr/core/src/test/org/apache/solr/schema/TestCollationFieldDocValues.java index 0417fe4e3a9..a3f489ed28e 100644 --- a/solr/core/src/test/org/apache/solr/schema/TestCollationFieldDocValues.java +++ b/solr/core/src/test/org/apache/solr/schema/TestCollationFieldDocValues.java @@ -23,7 +23,7 @@ import java.text.Collator; import java.text.RuleBasedCollator; import java.util.Locale; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.commons.io.FileUtils; @@ -65,7 +65,7 @@ public class TestCollationFieldDocValues extends SolrTestCaseJ4 { */ public static String setupSolrHome() throws Exception { // make a solr home underneath the test's TEMP_DIR - File tmpFile = _TestUtil.getTempDir("collation1"); + File tmpFile = TestUtil.getTempDir("collation1"); tmpFile.delete(); tmpFile.mkdir(); diff --git a/solr/core/src/test/org/apache/solr/search/CursorMarkTest.java b/solr/core/src/test/org/apache/solr/search/CursorMarkTest.java index 89dd7518830..5a822984925 100644 --- a/solr/core/src/test/org/apache/solr/search/CursorMarkTest.java +++ b/solr/core/src/test/org/apache/solr/search/CursorMarkTest.java @@ -17,7 +17,7 @@ package org.apache.solr.search; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; @@ -205,7 +205,7 @@ public class CursorMarkTest extends SolrTestCaseJ4 { if (null == sf) { // score or function results[i] = (Float) random().nextFloat() * random().nextInt(); break; - } else if (0 == _TestUtil.nextInt(random(), 0, 7)) { + } else if (0 == TestUtil.nextInt(random(), 0, 7)) { // emulate missing value for doc results[i] = null; } else { @@ -218,11 +218,11 @@ public class CursorMarkTest extends SolrTestCaseJ4 { Object val = null; if (fieldName.equals("id")) { - val = sf.getType().unmarshalSortValue(_TestUtil.randomSimpleString(random())); + val = sf.getType().unmarshalSortValue(TestUtil.randomSimpleString(random())); } else if (fieldName.startsWith("str")) { - val = sf.getType().unmarshalSortValue(_TestUtil.randomRealisticUnicodeString(random())); + val = sf.getType().unmarshalSortValue(TestUtil.randomRealisticUnicodeString(random())); } else if (fieldName.startsWith("bin")) { - byte[] randBytes = new byte[_TestUtil.nextInt(random(), 1, 50)]; + byte[] randBytes = new byte[TestUtil.nextInt(random(), 1, 50)]; random().nextBytes(randBytes); val = new BytesRef(randBytes); } else if (fieldName.startsWith("int")) { diff --git a/solr/core/src/test/org/apache/solr/search/ReturnFieldsTest.java b/solr/core/src/test/org/apache/solr/search/ReturnFieldsTest.java index 87280b4d258..311816f107d 100644 --- a/solr/core/src/test/org/apache/solr/search/ReturnFieldsTest.java +++ b/solr/core/src/test/org/apache/solr/search/ReturnFieldsTest.java @@ -17,13 +17,10 @@ package org.apache.solr.search; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.common.params.CommonParams; -import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.transform.*; -import org.apache.lucene.util._TestUtil; - import org.junit.BeforeClass; import org.junit.Test; @@ -333,14 +330,14 @@ public class ReturnFieldsTest extends SolrTestCaseJ4 { final boolean aliasId = r.nextBoolean(); final boolean aliasFoo = r.nextBoolean(); - final String id = _TestUtil.randomWhitespace(r, 0, 3) + + final String id = TestUtil.randomWhitespace(r, 0, 3) + (aliasId ? "aliasId:" : "") + "id" + - _TestUtil.randomWhitespace(r, 1, 3); - final String foo_i = _TestUtil.randomWhitespace(r, 0, 3) + + TestUtil.randomWhitespace(r, 1, 3); + final String foo_i = TestUtil.randomWhitespace(r, 0, 3) + (aliasFoo ? "aliasFoo:" : "") + "foo_i" + - _TestUtil.randomWhitespace(r, 0, 3); + TestUtil.randomWhitespace(r, 0, 3); final String fl = id + (r.nextBoolean() ? "" : ",") + foo_i; ReturnFields rf = new SolrReturnFields(req("fl", fl)); diff --git a/solr/core/src/test/org/apache/solr/search/TestSort.java b/solr/core/src/test/org/apache/solr/search/TestSort.java index 9c6093583e0..520ee7329dd 100644 --- a/solr/core/src/test/org/apache/solr/search/TestSort.java +++ b/solr/core/src/test/org/apache/solr/search/TestSort.java @@ -49,7 +49,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.schema.SchemaField; @@ -96,12 +96,12 @@ public class TestSort extends SolrTestCaseJ4 { for (int i = 0; i < iters; i++) { final StringBuilder input = new StringBuilder(); - final String[] names = new String[_TestUtil.nextInt(r,1,10)]; + final String[] names = new String[TestUtil.nextInt(r, 1, 10)]; final boolean[] reverse = new boolean[names.length]; for (int j = 0; j < names.length; j++) { names[j] = null; for (int k = 0; k < nonBlankAttempts && null == names[j]; k++) { - names[j] = _TestUtil.randomRealisticUnicodeString(r, 1, 100); + names[j] = TestUtil.randomRealisticUnicodeString(r, 1, 100); // munge anything that might make this a function names[j] = names[j].replaceFirst("\\{","\\{\\{"); diff --git a/solr/core/src/test/org/apache/solr/servlet/CacheHeaderTest.java b/solr/core/src/test/org/apache/solr/servlet/CacheHeaderTest.java index ab62d6575b8..d4f9e1bb7ac 100644 --- a/solr/core/src/test/org/apache/solr/servlet/CacheHeaderTest.java +++ b/solr/core/src/test/org/apache/solr/servlet/CacheHeaderTest.java @@ -27,7 +27,7 @@ import org.apache.http.Header; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.impl.cookie.DateUtils; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.common.params.CommonParams; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -253,7 +253,7 @@ public class CacheHeaderTest extends CacheHeaderTestBase { protected File makeFile(String contents, String charset) { try { - File f = _TestUtil.createTempFile("cachetest_csv", null, TEMP_DIR); + File f = TestUtil.createTempFile("cachetest_csv", null, TEMP_DIR); Writer out = new OutputStreamWriter(new FileOutputStream(f), charset); out.write(contents); out.close(); diff --git a/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java b/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java index 07526483bbf..95bda284b22 100644 --- a/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java +++ b/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java @@ -21,7 +21,7 @@ import java.util.List; import java.util.Set; import org.apache.lucene.util.LuceneTestCase.Slow; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.params.CommonParams; @@ -38,7 +38,6 @@ import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestHandler; import org.apache.solr.response.SolrQueryResponse; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; @Slow @@ -502,7 +501,7 @@ public class SpellCheckCollatorTest extends SolrTestCaseJ4 { // produce an estimate no more then the total number of docs final int iters = atLeast(10); for (int iter = 0; iter < iters; iter++) { - final int val = _TestUtil.nextInt(random(), 1, 17); + final int val = TestUtil.nextInt(random(), 1, 17); assertQ(req(reusedParams, CommonParams.Q, "teststop:metnoia", SpellingParams.SPELLCHECK_COLLATE_MAX_COLLECT_DOCS, ""+val) diff --git a/solr/core/src/test/org/apache/solr/update/TestDocBasedVersionConstraints.java b/solr/core/src/test/org/apache/solr/update/TestDocBasedVersionConstraints.java index 92aa4f2d9f3..21ceabaa2df 100755 --- a/solr/core/src/test/org/apache/solr/update/TestDocBasedVersionConstraints.java +++ b/solr/core/src/test/org/apache/solr/update/TestDocBasedVersionConstraints.java @@ -17,7 +17,7 @@ package org.apache.solr.update; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrException; import org.apache.solr.util.DefaultSolrThreadFactory; @@ -363,17 +363,17 @@ public class TestDocBasedVersionConstraints extends SolrTestCaseJ4 { // runner = Executors.newFixedThreadPool(1); // to test single threaded try { for (int id = 0; id < NUM_DOCS; id++) { - final int numAdds = _TestUtil.nextInt(random(),3,MAX_CONCURENT); - final int winner = _TestUtil.nextInt(random(),0,numAdds-1); + final int numAdds = TestUtil.nextInt(random(), 3, MAX_CONCURENT); + final int winner = TestUtil.nextInt(random(), 0, numAdds - 1); final int winnerVersion = atLeast(100); - final boolean winnerIsDeleted = (0 == _TestUtil.nextInt(random(),0,4)); + final boolean winnerIsDeleted = (0 == TestUtil.nextInt(random(), 0, 4)); List> tasks = new ArrayList>(numAdds); for (int variant = 0; variant < numAdds; variant++) { final boolean iShouldWin = (variant==winner); final long version = (iShouldWin ? winnerVersion - : _TestUtil.nextInt(random(),1,winnerVersion-1)); + : TestUtil.nextInt(random(), 1, winnerVersion - 1)); if ((iShouldWin && winnerIsDeleted) - || (!iShouldWin && 0 == _TestUtil.nextInt(random(),0,4))) { + || (!iShouldWin && 0 == TestUtil.nextInt(random(), 0, 4))) { tasks.add(delayedDelete(""+id, ""+version)); } else { tasks.add(delayedAdd("id",""+id,"name","name"+id+"_"+variant, diff --git a/solr/core/src/test/org/apache/solr/util/TestFastOutputStream.java b/solr/core/src/test/org/apache/solr/util/TestFastOutputStream.java index 7c17d450f16..eb8ec60b2d7 100644 --- a/solr/core/src/test/org/apache/solr/util/TestFastOutputStream.java +++ b/solr/core/src/test/org/apache/solr/util/TestFastOutputStream.java @@ -18,14 +18,9 @@ package org.apache.solr.util; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; import org.apache.solr.update.MemOutputStream; -import java.util.HashSet; -import java.util.Locale; import java.util.Random; -import java.util.Set; -import java.util.TimeZone; public class TestFastOutputStream extends LuceneTestCase { diff --git a/solr/core/src/test/org/apache/solr/util/TimeZoneUtilsTest.java b/solr/core/src/test/org/apache/solr/util/TimeZoneUtilsTest.java index cf6b1c59861..e4f695f5ed8 100644 --- a/solr/core/src/test/org/apache/solr/util/TimeZoneUtilsTest.java +++ b/solr/core/src/test/org/apache/solr/util/TimeZoneUtilsTest.java @@ -18,7 +18,7 @@ package org.apache.solr.util; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import java.util.Set; import java.util.HashSet; @@ -117,8 +117,8 @@ public class TimeZoneUtilsTest extends LuceneTestCase { final Random r = random(); final int iters = atLeast(r, 50); for (int i = 0; i <= iters; i++) { - int hour = _TestUtil.nextInt(r, 0, 23); - int min = _TestUtil.nextInt(r, 0, 59); + int hour = TestUtil.nextInt(r, 0, 23); + int min = TestUtil.nextInt(r, 0, 59); String hours = String.format(Locale.ROOT, (r.nextBoolean() ? ONE_DIGIT : TWO_DIGIT), diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java index 3a1cc4bc58d..86341d023c9 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java @@ -18,7 +18,6 @@ package org.apache.solr.client.solrj; -import java.io.IOException; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; @@ -27,15 +26,12 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; -import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.collect.Lists; import com.google.common.collect.Maps; import junit.framework.Assert; -import org.apache.lucene.util._TestUtil; -import org.apache.solr.SolrJettyTestBase; +import org.apache.lucene.util.TestUtil; import org.apache.solr.client.solrj.impl.BinaryResponseParser; import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer; import org.apache.solr.client.solrj.impl.HttpSolrServer; @@ -52,7 +48,6 @@ import org.apache.solr.client.solrj.response.PivotField; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.client.solrj.response.FacetField; import org.apache.solr.client.solrj.response.UpdateResponse; -import org.apache.solr.client.solrj.util.ClientUtils; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.SolrException; @@ -358,7 +353,7 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase } } - int numDocs = _TestUtil.nextInt(random(), 1, 10*RANDOM_MULTIPLIER); + int numDocs = TestUtil.nextInt(random(), 1, 10 * RANDOM_MULTIPLIER); // Empty the database... server.deleteByQuery("*:*");// delete everything! diff --git a/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java b/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java index c404e06f83b..1f7eebf3cf8 100644 --- a/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java +++ b/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java @@ -21,14 +21,14 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; public class TestJavaBinCodec extends LuceneTestCase { public void testStrings() throws Exception { JavaBinCodec javabin = new JavaBinCodec(); for (int i = 0; i < 10000*RANDOM_MULTIPLIER; i++) { - String s = _TestUtil.randomUnicodeString(random()); + String s = TestUtil.randomUnicodeString(random()); ByteArrayOutputStream os = new ByteArrayOutputStream(); javabin.marshal(s, os); ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray()); diff --git a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java index 3ccc5fb65f2..5b96642a90c 100644 --- a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java +++ b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java @@ -37,7 +37,7 @@ import junit.framework.Assert; import org.apache.commons.io.FileUtils; import org.apache.lucene.search.FieldCache; import org.apache.lucene.util.Constants; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.client.solrj.SolrResponse; import org.apache.solr.client.solrj.SolrServer; import org.apache.solr.client.solrj.SolrServerException; @@ -103,15 +103,15 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 { // half the time we use the root context, the other half... // Remember: randomSimpleString might be the empty string - hostContext.append(_TestUtil.randomSimpleString(random(), 2)); + hostContext.append(TestUtil.randomSimpleString(random(), 2)); if (random().nextBoolean()) { hostContext.append("_"); } - hostContext.append(_TestUtil.randomSimpleString(random(), 3)); + hostContext.append(TestUtil.randomSimpleString(random(), 3)); if ( ! "/".equals(hostContext.toString())) { // if our random string is empty, this might add a trailing slash, // but our code should be ok with that - hostContext.append("/").append(_TestUtil.randomSimpleString(random(), 2)); + hostContext.append("/").append(TestUtil.randomSimpleString(random(), 2)); } else { // we got 'lucky' and still just have the root context, // NOOP: don't try to add a subdir to nothing (ie "//" is bad) diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java index 67641a4018b..9023c58e753 100644 --- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java +++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java @@ -49,7 +49,7 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.QuickPatchThreadsFilter; -import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.TestUtil; import org.apache.solr.client.solrj.impl.HttpClientConfigurer; import org.apache.solr.client.solrj.impl.HttpClientUtil; import org.apache.solr.client.solrj.util.ClientUtils; @@ -278,8 +278,8 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { // don't ask iwc.getMaxThreadStates(), sometimes newIWC uses // RandomDocumentsWriterPerThreadPool and all hell breaks loose int maxIndexingThreads = rarely(random()) - ? _TestUtil.nextInt(random(), 5, 20) // crazy value - : _TestUtil.nextInt(random(), 1, 4); // reasonable value + ? TestUtil.nextInt(random(), 5, 20) // crazy value + : TestUtil.nextInt(random(), 1, 4); // reasonable value System.setProperty("solr.tests.maxIndexingThreads", String.valueOf(maxIndexingThreads)); }