mirror of https://github.com/apache/lucene.git
LUCENE-5449: Rename _TestUtil to TestUtil.
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1569597 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a904e73bf4
commit
36b2f2512b
|
@ -31,7 +31,7 @@ import org.apache.lucene.analysis.Analyzer;
|
|||
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
|
||||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
|
||||
|
||||
|
@ -786,7 +786,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testRandomBrokenHTML() throws Exception {
|
||||
int maxNumElements = 10000;
|
||||
String text = _TestUtil.randomHtmlishString(random(), maxNumElements);
|
||||
String text = TestUtil.randomHtmlishString(random(), maxNumElements);
|
||||
checkAnalysisConsistency(random(), newTestAnalyzer(), random().nextBoolean(), text);
|
||||
}
|
||||
|
||||
|
@ -796,18 +796,18 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
|
|||
int maxNumWords = 10000;
|
||||
int minWordLength = 3;
|
||||
int maxWordLength = 20;
|
||||
int numWords = _TestUtil.nextInt(random(), minNumWords, maxNumWords);
|
||||
switch (_TestUtil.nextInt(random(), 0, 4)) {
|
||||
int numWords = TestUtil.nextInt(random(), minNumWords, maxNumWords);
|
||||
switch (TestUtil.nextInt(random(), 0, 4)) {
|
||||
case 0: {
|
||||
for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) {
|
||||
text.append(_TestUtil.randomUnicodeString(random(), maxWordLength));
|
||||
text.append(TestUtil.randomUnicodeString(random(), maxWordLength));
|
||||
text.append(' ');
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) {
|
||||
text.append(_TestUtil.randomRealisticUnicodeString
|
||||
text.append(TestUtil.randomRealisticUnicodeString
|
||||
(random(), minWordLength, maxWordLength));
|
||||
text.append(' ');
|
||||
}
|
||||
|
@ -815,7 +815,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
|
|||
}
|
||||
default: { // ASCII 50% of the time
|
||||
for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) {
|
||||
text.append(_TestUtil.randomSimpleString(random()));
|
||||
text.append(TestUtil.randomSimpleString(random()));
|
||||
text.append(' ');
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,8 +33,8 @@ import org.apache.lucene.analysis.CharFilter;
|
|||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestMappingCharFilter extends BaseTokenStreamTestCase {
|
||||
|
||||
|
@ -274,9 +274,9 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
|
|||
int num = random.nextInt(5);
|
||||
//System.out.println("NormalizeCharMap=");
|
||||
for (int i = 0; i < num; i++) {
|
||||
String key = _TestUtil.randomSimpleString(random);
|
||||
String key = TestUtil.randomSimpleString(random);
|
||||
if (!keys.contains(key) && key.length() != 0) {
|
||||
String value = _TestUtil.randomSimpleString(random);
|
||||
String value = TestUtil.randomSimpleString(random);
|
||||
builder.add(key, value);
|
||||
keys.add(key);
|
||||
//System.out.println("mapping: '" + key + "' => '" + value + "'");
|
||||
|
@ -294,7 +294,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
|
|||
System.out.println("\nTEST iter=" + iter);
|
||||
}
|
||||
|
||||
final char endLetter = (char) _TestUtil.nextInt(random, 'b', 'z');
|
||||
final char endLetter = (char) TestUtil.nextInt(random, 'b', 'z');
|
||||
|
||||
final Map<String,String> map = new HashMap<String,String>();
|
||||
final NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder();
|
||||
|
@ -303,9 +303,9 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
|
|||
System.out.println(" mappings:");
|
||||
}
|
||||
while (map.size() < numMappings) {
|
||||
final String key = _TestUtil.randomSimpleStringRange(random, 'a', endLetter, 7);
|
||||
final String key = TestUtil.randomSimpleStringRange(random, 'a', endLetter, 7);
|
||||
if (key.length() != 0 && !map.containsKey(key)) {
|
||||
final String value = _TestUtil.randomSimpleString(random);
|
||||
final String value = TestUtil.randomSimpleString(random);
|
||||
map.put(key, value);
|
||||
builder.add(key, value);
|
||||
if (VERBOSE) {
|
||||
|
@ -321,7 +321,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
for(int iter2=0;iter2<100;iter2++) {
|
||||
final String content = _TestUtil.randomSimpleStringRange(random, 'a', endLetter, atLeast(1000));
|
||||
final String content = TestUtil.randomSimpleStringRange(random, 'a', endLetter, atLeast(1000));
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println(" content=" + content);
|
||||
|
@ -427,7 +427,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
|
|||
}
|
||||
actualBuilder.append((char) ch);
|
||||
} else {
|
||||
final char[] buffer = new char[_TestUtil.nextInt(random, 1, 100)];
|
||||
final char[] buffer = new char[TestUtil.nextInt(random, 1, 100)];
|
||||
final int off = buffer.length == 1 ? 0 : random.nextInt(buffer.length-1);
|
||||
final int count = mapFilter.read(buffer, off, buffer.length-off);
|
||||
if (count == -1) {
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.BasicOperations;
|
||||
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
|
@ -76,7 +76,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase {
|
|||
}
|
||||
};
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
String s = _TestUtil.randomSimpleString(random);
|
||||
String s = TestUtil.randomSimpleString(random);
|
||||
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
|
||||
right.tokenStream("foo", newStringReader(s)));
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase {
|
|||
};
|
||||
int numIterations = atLeast(50);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
String s = _TestUtil.randomSimpleString(random, maxLength);
|
||||
String s = TestUtil.randomSimpleString(random, maxLength);
|
||||
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
|
||||
right.tokenStream("foo", newStringReader(s)));
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase {
|
|||
}
|
||||
};
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
String s = _TestUtil.randomHtmlishString(random, 20);
|
||||
String s = TestUtil.randomHtmlishString(random, 20);
|
||||
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
|
||||
right.tokenStream("foo", newStringReader(s)));
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase {
|
|||
};
|
||||
int numIterations = atLeast(50);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
String s = _TestUtil.randomHtmlishString(random, maxLength);
|
||||
String s = TestUtil.randomHtmlishString(random, maxLength);
|
||||
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
|
||||
right.tokenStream("foo", newStringReader(s)));
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase {
|
|||
}
|
||||
};
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
String s = _TestUtil.randomUnicodeString(random);
|
||||
String s = TestUtil.randomUnicodeString(random);
|
||||
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
|
||||
right.tokenStream("foo", newStringReader(s)));
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase {
|
|||
};
|
||||
int numIterations = atLeast(50);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
String s = _TestUtil.randomUnicodeString(random, maxLength);
|
||||
String s = TestUtil.randomUnicodeString(random, maxLength);
|
||||
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
|
||||
right.tokenStream("foo", newStringReader(s)));
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
|
||||
|
||||
|
@ -97,21 +97,21 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
|
|||
writer.close();
|
||||
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
DocsEnum td = _TestUtil.docs(random(),
|
||||
reader,
|
||||
"partnum",
|
||||
new BytesRef("Q36"),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
0);
|
||||
DocsEnum td = TestUtil.docs(random(),
|
||||
reader,
|
||||
"partnum",
|
||||
new BytesRef("Q36"),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
0);
|
||||
assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
td = _TestUtil.docs(random(),
|
||||
reader,
|
||||
"partnum",
|
||||
new BytesRef("Q37"),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
0);
|
||||
td = TestUtil.docs(random(),
|
||||
reader,
|
||||
"partnum",
|
||||
new BytesRef("Q37"),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
0);
|
||||
assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
}
|
||||
|
||||
|
|
|
@ -85,8 +85,8 @@ import org.apache.lucene.util.AttributeSource;
|
|||
import org.apache.lucene.util.AttributeSource.AttributeFactory;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
import org.apache.lucene.util.Rethrow;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -305,7 +305,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
|
|||
// TODO: could cause huge ram usage to use full int range for some filters
|
||||
// (e.g. allocate enormous arrays)
|
||||
// return Integer.valueOf(random.nextInt());
|
||||
return Integer.valueOf(_TestUtil.nextInt(random, -100, 100));
|
||||
return Integer.valueOf(TestUtil.nextInt(random, -100, 100));
|
||||
}
|
||||
});
|
||||
put(char.class, new ArgProducer() {
|
||||
|
@ -372,7 +372,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
|
|||
Collection<char[]> col = new ArrayList<char[]>();
|
||||
int num = random.nextInt(5);
|
||||
for (int i = 0; i < num; i++) {
|
||||
col.add(_TestUtil.randomSimpleString(random).toCharArray());
|
||||
col.add(TestUtil.randomSimpleString(random).toCharArray());
|
||||
}
|
||||
return col;
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
|
|||
CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, num, random.nextBoolean());
|
||||
for (int i = 0; i < num; i++) {
|
||||
// TODO: make nastier
|
||||
set.add(_TestUtil.randomSimpleString(random));
|
||||
set.add(TestUtil.randomSimpleString(random));
|
||||
}
|
||||
return set;
|
||||
}
|
||||
|
@ -451,7 +451,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
|
|||
// a token type
|
||||
return StandardTokenizer.TOKEN_TYPES[random.nextInt(StandardTokenizer.TOKEN_TYPES.length)];
|
||||
} else {
|
||||
return _TestUtil.randomSimpleString(random);
|
||||
return TestUtil.randomSimpleString(random);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -463,9 +463,9 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
|
|||
int num = random.nextInt(5);
|
||||
//System.out.println("NormalizeCharMap=");
|
||||
for (int i = 0; i < num; i++) {
|
||||
String key = _TestUtil.randomSimpleString(random);
|
||||
String key = TestUtil.randomSimpleString(random);
|
||||
if (!keys.contains(key) && key.length() > 0) {
|
||||
String value = _TestUtil.randomSimpleString(random);
|
||||
String value = TestUtil.randomSimpleString(random);
|
||||
builder.add(key, value);
|
||||
keys.add(key);
|
||||
//System.out.println("mapping: '" + key + "' => '" + value + "'");
|
||||
|
@ -492,7 +492,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
|
|||
CharArrayMap<String> map = new CharArrayMap<String>(TEST_VERSION_CURRENT, num, random.nextBoolean());
|
||||
for (int i = 0; i < num; i++) {
|
||||
// TODO: make nastier
|
||||
map.put(_TestUtil.randomSimpleString(random), _TestUtil.randomSimpleString(random));
|
||||
map.put(TestUtil.randomSimpleString(random), TestUtil.randomSimpleString(random));
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
@ -504,11 +504,11 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
|
|||
for (int i = 0; i < num; i++) {
|
||||
String input = "";
|
||||
do {
|
||||
input = _TestUtil.randomRealisticUnicodeString(random);
|
||||
input = TestUtil.randomRealisticUnicodeString(random);
|
||||
} while(input.isEmpty());
|
||||
String out = ""; _TestUtil.randomSimpleString(random);
|
||||
String out = ""; TestUtil.randomSimpleString(random);
|
||||
do {
|
||||
out = _TestUtil.randomRealisticUnicodeString(random);
|
||||
out = TestUtil.randomRealisticUnicodeString(random);
|
||||
} while(out.isEmpty());
|
||||
builder.add(input, out);
|
||||
}
|
||||
|
@ -543,7 +543,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
|
|||
|
||||
private String randomNonEmptyString(Random random) {
|
||||
while(true) {
|
||||
final String s = _TestUtil.randomUnicodeString(random).trim();
|
||||
final String s = TestUtil.randomUnicodeString(random).trim();
|
||||
if (s.length() != 0 && s.indexOf('\u0000') == -1) {
|
||||
return s;
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.analysis.Tokenizer;
|
|||
import org.apache.lucene.analysis.core.KeywordTokenizer;
|
||||
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
@ -56,13 +56,13 @@ public class HunspellStemFilterTest extends BaseTokenStreamTestCase {
|
|||
public void testKeywordAttribute() throws IOException {
|
||||
MockTokenizer tokenizer = whitespaceMockTokenizer("lucene is awesome");
|
||||
tokenizer.setEnableChecks(true);
|
||||
HunspellStemFilter filter = new HunspellStemFilter(tokenizer, DICTIONARY, _TestUtil.nextInt(random(), 1, 3));
|
||||
HunspellStemFilter filter = new HunspellStemFilter(tokenizer, DICTIONARY, TestUtil.nextInt(random(), 1, 3));
|
||||
assertTokenStreamContents(filter, new String[]{"lucene", "lucen", "is", "awesome"}, new int[] {1, 0, 1, 1});
|
||||
|
||||
// assert with keywork marker
|
||||
tokenizer = whitespaceMockTokenizer("lucene is awesome");
|
||||
CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList("Lucene"), true);
|
||||
filter = new HunspellStemFilter(new SetKeywordMarkerFilter(tokenizer, set), DICTIONARY, _TestUtil.nextInt(random(), 1, 3));
|
||||
filter = new HunspellStemFilter(new SetKeywordMarkerFilter(tokenizer, set), DICTIONARY, TestUtil.nextInt(random(), 1, 3));
|
||||
assertTokenStreamContents(filter, new String[]{"lucene", "is", "awesome"}, new int[] {1, 1, 1});
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ public class HunspellStemFilterTest extends BaseTokenStreamTestCase {
|
|||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
|
||||
return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, DICTIONARY, _TestUtil.nextInt(random(), 1, 3)));
|
||||
return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, DICTIONARY, TestUtil.nextInt(random(), 1, 3)));
|
||||
}
|
||||
};
|
||||
checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
|
||||
|
@ -84,7 +84,7 @@ public class HunspellStemFilterTest extends BaseTokenStreamTestCase {
|
|||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
Tokenizer tokenizer = new KeywordTokenizer();
|
||||
return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, DICTIONARY, _TestUtil.nextInt(random(), 1, 3)));
|
||||
return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, DICTIONARY, TestUtil.nextInt(random(), 1, 3)));
|
||||
}
|
||||
};
|
||||
checkOneTerm(a, "", "");
|
||||
|
|
|
@ -18,16 +18,15 @@ package org.apache.lucene.analysis.miscellaneous;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
|
||||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.KeywordTokenizer;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestCodepointCountFilter extends BaseTokenStreamTestCase {
|
||||
public void testFilterWithPosIncr() throws Exception {
|
||||
|
@ -52,9 +51,9 @@ public class TestCodepointCountFilter extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testRandomStrings() throws IOException {
|
||||
for (int i = 0; i < 10000; i++) {
|
||||
String text = _TestUtil.randomUnicodeString(random(), 100);
|
||||
int min = _TestUtil.nextInt(random(), 0, 100);
|
||||
int max = _TestUtil.nextInt(random(), 0, 100);
|
||||
String text = TestUtil.randomUnicodeString(random(), 100);
|
||||
int min = TestUtil.nextInt(random(), 0, 100);
|
||||
int max = TestUtil.nextInt(random(), 0, 100);
|
||||
int count = text.codePointCount(0, text.length());
|
||||
boolean expected = count >= min && count <= max;
|
||||
TokenStream stream = new KeywordTokenizer();
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.index.IndexWriter;
|
|||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestLimitTokenCountAnalyzer extends BaseTokenStreamTestCase {
|
||||
|
||||
|
@ -59,7 +59,7 @@ public class TestLimitTokenCountAnalyzer extends BaseTokenStreamTestCase {
|
|||
|
||||
for (boolean consumeAll : new boolean[] { true, false }) {
|
||||
Directory dir = newDirectory();
|
||||
int limit = _TestUtil.nextInt(random(), 50, 101000);
|
||||
int limit = TestUtil.nextInt(random(), 50, 101000);
|
||||
MockAnalyzer mock = new MockAnalyzer(random());
|
||||
|
||||
// if we are consuming all tokens, we can use the checks,
|
||||
|
|
|
@ -30,10 +30,9 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
|||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.util.Iterator;
|
||||
import java.util.Arrays;
|
||||
|
||||
|
@ -129,7 +128,7 @@ public class TestRemoveDuplicatesTokenFilter extends BaseTokenStreamTestCase {
|
|||
// some helper methods for the below test with synonyms
|
||||
private String randomNonEmptyString() {
|
||||
while(true) {
|
||||
final String s = _TestUtil.randomUnicodeString(random()).trim();
|
||||
final String s = TestUtil.randomUnicodeString(random()).trim();
|
||||
if (s.length() != 0 && s.indexOf('\u0000') == -1) {
|
||||
return s;
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.analysis.core.KeywordTokenizer;
|
|||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.analysis.en.PorterStemFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter.StemmerOverrideMap;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -81,7 +81,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase {
|
|||
Map<String,String> map = new HashMap<String,String>();
|
||||
int numTerms = atLeast(50);
|
||||
for (int i = 0; i < numTerms; i++) {
|
||||
String randomRealisticUnicodeString = _TestUtil
|
||||
String randomRealisticUnicodeString = TestUtil
|
||||
.randomRealisticUnicodeString(random());
|
||||
char[] charArray = randomRealisticUnicodeString.toCharArray();
|
||||
StringBuilder builder = new StringBuilder();
|
||||
|
@ -93,7 +93,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase {
|
|||
j += Character.charCount(cp);
|
||||
}
|
||||
if (builder.length() > 0) {
|
||||
String value = _TestUtil.randomSimpleString(random());
|
||||
String value = TestUtil.randomSimpleString(random());
|
||||
map.put(builder.toString(),
|
||||
value.isEmpty() ? "a" : value);
|
||||
|
||||
|
@ -124,10 +124,10 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase {
|
|||
Map<String,String> map = new HashMap<String,String>();
|
||||
int numTerms = atLeast(50);
|
||||
for (int i = 0; i < numTerms; i++) {
|
||||
String randomRealisticUnicodeString = _TestUtil
|
||||
String randomRealisticUnicodeString = TestUtil
|
||||
.randomRealisticUnicodeString(random());
|
||||
if (randomRealisticUnicodeString.length() > 0) {
|
||||
String value = _TestUtil.randomSimpleString(random());
|
||||
String value = TestUtil.randomSimpleString(random());
|
||||
map.put(randomRealisticUnicodeString,
|
||||
value.isEmpty() ? "a" : value);
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.analysis.ngram;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
import java.util.Random;
|
||||
|
||||
|
@ -35,7 +34,8 @@ import org.apache.lucene.analysis.shingle.ShingleFilter;
|
|||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests {@link EdgeNGramTokenFilter} for correctness.
|
||||
|
@ -171,8 +171,8 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
/** blast some random strings through the analyzer */
|
||||
public void testRandomStrings() throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
final int min = _TestUtil.nextInt(random(), 2, 10);
|
||||
final int max = _TestUtil.nextInt(random(), min, 20);
|
||||
final int min = TestUtil.nextInt(random(), 2, 10);
|
||||
final int max = TestUtil.nextInt(random(), min, 20);
|
||||
|
||||
Analyzer a = new Analyzer() {
|
||||
@Override
|
||||
|
@ -215,10 +215,10 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testSupplementaryCharacters() throws IOException {
|
||||
final String s = _TestUtil.randomUnicodeString(random(), 10);
|
||||
final String s = TestUtil.randomUnicodeString(random(), 10);
|
||||
final int codePointCount = s.codePointCount(0, s.length());
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 3);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 10);
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 3);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 10);
|
||||
TokenStream tk = new KeywordTokenizer();
|
||||
((Tokenizer)tk).setReader(new StringReader(s));
|
||||
tk = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram);
|
||||
|
|
|
@ -19,14 +19,13 @@ package org.apache.lucene.analysis.ngram;
|
|||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
|
||||
|
@ -101,8 +100,8 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
|
|||
/** blast some random strings through the analyzer */
|
||||
public void testRandomStrings() throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
final int min = _TestUtil.nextInt(random(), 2, 10);
|
||||
final int max = _TestUtil.nextInt(random(), min, 20);
|
||||
final int min = TestUtil.nextInt(random(), 2, 10);
|
||||
final int max = TestUtil.nextInt(random(), min, 20);
|
||||
|
||||
Analyzer a = new Analyzer() {
|
||||
@Override
|
||||
|
@ -141,47 +140,47 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testLargeInput() throws IOException {
|
||||
// test sliding
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
|
||||
}
|
||||
|
||||
public void testLargeMaxGram() throws IOException {
|
||||
// test sliding with maxGram > 1024
|
||||
final int minGram = _TestUtil.nextInt(random(), 1290, 1300);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 1300);
|
||||
testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
|
||||
final int minGram = TestUtil.nextInt(random(), 1290, 1300);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 1300);
|
||||
testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
|
||||
}
|
||||
|
||||
public void testPreTokenization() throws IOException {
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "a");
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 0, 4 * 1024), "a");
|
||||
}
|
||||
|
||||
public void testHeavyPreTokenization() throws IOException {
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "abcdef");
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 0, 4 * 1024), "abcdef");
|
||||
}
|
||||
|
||||
public void testFewTokenChars() throws IOException {
|
||||
final char[] chrs = new char[_TestUtil.nextInt(random(), 4000, 5000)];
|
||||
final char[] chrs = new char[TestUtil.nextInt(random(), 4000, 5000)];
|
||||
Arrays.fill(chrs, ' ');
|
||||
for (int i = 0; i < chrs.length; ++i) {
|
||||
if (random().nextFloat() < 0.1) {
|
||||
chrs[i] = 'a';
|
||||
}
|
||||
}
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 2);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 2);
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 2);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 2);
|
||||
testNGrams(minGram, maxGram, new String(chrs), " ");
|
||||
}
|
||||
|
||||
public void testFullUTF8Range() throws IOException {
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
|
||||
final String s = _TestUtil.randomUnicodeString(random(), 4 * 1024);
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 100);
|
||||
final String s = TestUtil.randomUnicodeString(random(), 4 * 1024);
|
||||
testNGrams(minGram, maxGram, s, "");
|
||||
testNGrams(minGram, maxGram, s, "abcdef");
|
||||
}
|
||||
|
|
|
@ -28,11 +28,10 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
|||
import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
import java.util.Random;
|
||||
|
||||
|
@ -146,8 +145,8 @@ public class NGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
/** blast some random strings through the analyzer */
|
||||
public void testRandomStrings() throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
final int min = _TestUtil.nextInt(random(), 2, 10);
|
||||
final int max = _TestUtil.nextInt(random(), min, 20);
|
||||
final int min = TestUtil.nextInt(random(), 2, 10);
|
||||
final int max = TestUtil.nextInt(random(), min, 20);
|
||||
Analyzer a = new Analyzer() {
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
|
@ -186,10 +185,10 @@ public class NGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testSupplementaryCharacters() throws IOException {
|
||||
final String s = _TestUtil.randomUnicodeString(random(), 10);
|
||||
final String s = TestUtil.randomUnicodeString(random(), 10);
|
||||
final int codePointCount = s.codePointCount(0, s.length());
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 3);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 10);
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 3);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 10);
|
||||
TokenStream tk = new KeywordTokenizer();
|
||||
((Tokenizer)tk).setReader(new StringReader(s));
|
||||
tk = new NGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram);
|
||||
|
|
|
@ -18,22 +18,18 @@ package org.apache.lucene.analysis.ngram;
|
|||
*/
|
||||
|
||||
|
||||
import static org.apache.lucene.analysis.ngram.NGramTokenizerTest.isTokenChar;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
|
||||
|
@ -115,8 +111,8 @@ public class NGramTokenizerTest extends BaseTokenStreamTestCase {
|
|||
/** blast some random strings through the analyzer */
|
||||
public void testRandomStrings() throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
final int min = _TestUtil.nextInt(random(), 2, 10);
|
||||
final int max = _TestUtil.nextInt(random(), min, 20);
|
||||
final int min = TestUtil.nextInt(random(), 2, 10);
|
||||
final int max = TestUtil.nextInt(random(), min, 20);
|
||||
Analyzer a = new Analyzer() {
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
|
@ -205,47 +201,47 @@ public class NGramTokenizerTest extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testLargeInput() throws IOException {
|
||||
// test sliding
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
|
||||
}
|
||||
|
||||
public void testLargeMaxGram() throws IOException {
|
||||
// test sliding with maxGram > 1024
|
||||
final int minGram = _TestUtil.nextInt(random(), 1290, 1300);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 1300);
|
||||
testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
|
||||
final int minGram = TestUtil.nextInt(random(), 1290, 1300);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 1300);
|
||||
testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
|
||||
}
|
||||
|
||||
public void testPreTokenization() throws IOException {
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "a");
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 0, 4 * 1024), "a");
|
||||
}
|
||||
|
||||
public void testHeavyPreTokenization() throws IOException {
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "abcdef");
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 100);
|
||||
testNGrams(minGram, maxGram, TestUtil.nextInt(random(), 0, 4 * 1024), "abcdef");
|
||||
}
|
||||
|
||||
public void testFewTokenChars() throws IOException {
|
||||
final char[] chrs = new char[_TestUtil.nextInt(random(), 4000, 5000)];
|
||||
final char[] chrs = new char[TestUtil.nextInt(random(), 4000, 5000)];
|
||||
Arrays.fill(chrs, ' ');
|
||||
for (int i = 0; i < chrs.length; ++i) {
|
||||
if (random().nextFloat() < 0.1) {
|
||||
chrs[i] = 'a';
|
||||
}
|
||||
}
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 2);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 2);
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 2);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 2);
|
||||
testNGrams(minGram, maxGram, new String(chrs), " ");
|
||||
}
|
||||
|
||||
public void testFullUTF8Range() throws IOException {
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
|
||||
final String s = _TestUtil.randomUnicodeString(random(), 4 * 1024);
|
||||
final int minGram = TestUtil.nextInt(random(), 1, 100);
|
||||
final int maxGram = TestUtil.nextInt(random(), minGram, 100);
|
||||
final String s = TestUtil.randomUnicodeString(random(), 4 * 1024);
|
||||
testNGrams(minGram, maxGram, s, "");
|
||||
testNGrams(minGram, maxGram, s, "abcdef");
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.analysis.CharFilter;
|
|||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.Ignore;
|
||||
|
||||
/**
|
||||
|
@ -302,9 +302,9 @@ public class TestPatternReplaceCharFilter extends BaseTokenStreamTestCase {
|
|||
int numPatterns = 10 + random().nextInt(20);
|
||||
Random random = new Random(random().nextLong());
|
||||
for (int i = 0; i < numPatterns; i++) {
|
||||
final Pattern p = _TestUtil.randomPattern(random());
|
||||
final Pattern p = TestUtil.randomPattern(random());
|
||||
|
||||
final String replacement = _TestUtil.randomSimpleString(random);
|
||||
final String replacement = TestUtil.randomSimpleString(random);
|
||||
Analyzer a = new Analyzer() {
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package org.apache.lucene.analysis.synonym;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -39,7 +38,7 @@ import org.apache.lucene.analysis.MockGraphTokenFilter;
|
|||
import org.apache.lucene.analysis.core.KeywordTokenizer;
|
||||
import org.apache.lucene.analysis.tokenattributes.*;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestSynonymMapFilter extends BaseTokenStreamTestCase {
|
||||
|
||||
|
@ -383,7 +382,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testRandom() throws Exception {
|
||||
|
||||
final int alphabetSize = _TestUtil.nextInt(random(), 2, 7);
|
||||
final int alphabetSize = TestUtil.nextInt(random(), 2, 7);
|
||||
|
||||
final int docLen = atLeast(3000);
|
||||
//final int docLen = 50;
|
||||
|
@ -405,7 +404,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase {
|
|||
}
|
||||
b = new SynonymMap.Builder(dedup);
|
||||
for(int synIDX=0;synIDX<numSyn;synIDX++) {
|
||||
final String synIn = getRandomString('a', alphabetSize, _TestUtil.nextInt(random(), 1, 5)).trim();
|
||||
final String synIn = getRandomString('a', alphabetSize, TestUtil.nextInt(random(), 1, 5)).trim();
|
||||
OneSyn s = synMap.get(synIn);
|
||||
if (s == null) {
|
||||
s = new OneSyn();
|
||||
|
@ -415,7 +414,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase {
|
|||
synMap.put(synIn, s);
|
||||
s.keepOrig = random().nextBoolean();
|
||||
}
|
||||
final String synOut = getRandomString('0', 10, _TestUtil.nextInt(random(), 1, 5)).trim();
|
||||
final String synOut = getRandomString('0', 10, TestUtil.nextInt(random(), 1, 5)).trim();
|
||||
s.out.add(synOut);
|
||||
add(synIn, synOut, s.keepOrig);
|
||||
if (VERBOSE) {
|
||||
|
@ -472,7 +471,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase {
|
|||
|
||||
private String randomNonEmptyString() {
|
||||
while(true) {
|
||||
final String s = _TestUtil.randomUnicodeString(random()).trim();
|
||||
final String s = TestUtil.randomUnicodeString(random()).trim();
|
||||
if (s.length() != 0 && s.indexOf('\u0000') == -1) {
|
||||
return s;
|
||||
}
|
||||
|
|
|
@ -22,8 +22,7 @@ import java.text.CharacterIterator;
|
|||
import java.util.Locale;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestCharArrayIterator extends LuceneTestCase {
|
||||
|
||||
|
@ -36,7 +35,7 @@ public class TestCharArrayIterator extends LuceneTestCase {
|
|||
BreakIterator bi = BreakIterator.getWordInstance(Locale.getDefault());
|
||||
CharArrayIterator ci = CharArrayIterator.newWordInstance();
|
||||
for (int i = 0; i < 10000; i++) {
|
||||
char text[] = _TestUtil.randomUnicodeString(random()).toCharArray();
|
||||
char text[] = TestUtil.randomUnicodeString(random()).toCharArray();
|
||||
ci.setText(text, 0, text.length);
|
||||
consume(bi, ci);
|
||||
}
|
||||
|
@ -66,7 +65,7 @@ public class TestCharArrayIterator extends LuceneTestCase {
|
|||
BreakIterator bi = BreakIterator.getSentenceInstance(Locale.getDefault());
|
||||
CharArrayIterator ci = CharArrayIterator.newSentenceInstance();
|
||||
for (int i = 0; i < 10000; i++) {
|
||||
char text[] = _TestUtil.randomUnicodeString(random()).toCharArray();
|
||||
char text[] = TestUtil.randomUnicodeString(random()).toCharArray();
|
||||
ci.setText(text, 0, text.length);
|
||||
consume(bi, ci);
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.analysis.util;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
import java.util.Locale;
|
||||
|
||||
|
@ -29,7 +28,7 @@ import org.apache.lucene.analysis.Tokenizer;
|
|||
import org.apache.lucene.analysis.core.LetterTokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -126,7 +125,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
};
|
||||
int num = 1000 * RANDOM_MULTIPLIER;
|
||||
for (int i = 0; i < num; i++) {
|
||||
String s = _TestUtil.randomUnicodeString(random());
|
||||
String s = TestUtil.randomUnicodeString(random());
|
||||
try (TokenStream ts = analyzer.tokenStream("foo", s)) {
|
||||
ts.reset();
|
||||
OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
|
||||
|
@ -164,7 +163,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
};
|
||||
int num = 1000 * RANDOM_MULTIPLIER;
|
||||
for (int i = 0; i < num; i++) {
|
||||
String s = _TestUtil.randomUnicodeString(random());
|
||||
String s = TestUtil.randomUnicodeString(random());
|
||||
try (TokenStream ts = analyzer.tokenStream("foo", s)) {
|
||||
ts.reset();
|
||||
OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
|
||||
|
|
|
@ -24,7 +24,7 @@ import java.util.Arrays;
|
|||
|
||||
import org.apache.lucene.analysis.util.CharacterUtils.CharacterBuffer;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -79,7 +79,7 @@ public class TestCharacterUtils extends LuceneTestCase {
|
|||
public void testCodePointCount() {
|
||||
CharacterUtils java4 = CharacterUtils.getJava4Instance();
|
||||
CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
|
||||
final String s = _TestUtil.randomUnicodeString(random());
|
||||
final String s = TestUtil.randomUnicodeString(random());
|
||||
assertEquals(s.length(), java4.codePointCount(s));
|
||||
assertEquals(Character.codePointCount(s, 0, s.length()), java5.codePointCount(s));
|
||||
}
|
||||
|
@ -89,8 +89,8 @@ public class TestCharacterUtils extends LuceneTestCase {
|
|||
CharacterUtils java4 = CharacterUtils.getJava4Instance();
|
||||
CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
final char[] s = _TestUtil.randomUnicodeString(random()).toCharArray();
|
||||
final int index = _TestUtil.nextInt(random(), 0, s.length);
|
||||
final char[] s = TestUtil.randomUnicodeString(random()).toCharArray();
|
||||
final int index = TestUtil.nextInt(random(), 0, s.length);
|
||||
final int offset = random().nextInt(7) - 3;
|
||||
try {
|
||||
final int o = java4.offsetByCodePoints(s, 0, s.length, index, offset);
|
||||
|
@ -125,12 +125,12 @@ public class TestCharacterUtils extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private void testConversions(CharacterUtils charUtils) {
|
||||
final char[] orig = _TestUtil.randomUnicodeString(random(), 100).toCharArray();
|
||||
final char[] orig = TestUtil.randomUnicodeString(random(), 100).toCharArray();
|
||||
final int[] buf = new int[orig.length];
|
||||
final char[] restored = new char[buf.length];
|
||||
final int o1 = _TestUtil.nextInt(random(), 0, Math.min(5, orig.length));
|
||||
final int o2 = _TestUtil.nextInt(random(), 0, o1);
|
||||
final int o3 = _TestUtil.nextInt(random(), 0, o1);
|
||||
final int o1 = TestUtil.nextInt(random(), 0, Math.min(5, orig.length));
|
||||
final int o2 = TestUtil.nextInt(random(), 0, o1);
|
||||
final int o3 = TestUtil.nextInt(random(), 0, o1);
|
||||
final int codePointCount = charUtils.toCodePoints(orig, o1, orig.length - o1, buf, o2);
|
||||
final int charCount = charUtils.toChars(buf, o2, codePointCount, restored, o3);
|
||||
assertEquals(orig.length - o1, charCount);
|
||||
|
|
|
@ -26,7 +26,8 @@ import java.io.Writer;
|
|||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestFilesystemResourceLoader extends LuceneTestCase {
|
||||
|
||||
|
@ -60,7 +61,7 @@ public class TestFilesystemResourceLoader extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testBaseDir() throws Exception {
|
||||
final File base = _TestUtil.getTempDir("fsResourceLoaderBase").getAbsoluteFile();
|
||||
final File base = TestUtil.getTempDir("fsResourceLoaderBase").getAbsoluteFile();
|
||||
try {
|
||||
base.mkdirs();
|
||||
Writer os = new OutputStreamWriter(new FileOutputStream(new File(base, "template.txt")), IOUtils.CHARSET_UTF_8);
|
||||
|
@ -86,7 +87,7 @@ public class TestFilesystemResourceLoader extends LuceneTestCase {
|
|||
assertClasspathDelegation(rl);
|
||||
assertNotFound(rl);
|
||||
} finally {
|
||||
_TestUtil.rmDir(base);
|
||||
TestUtil.rmDir(base);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ import java.io.StringReader;
|
|||
import java.util.Random;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestRollingCharBuffer extends LuceneTestCase {
|
||||
|
||||
|
@ -37,7 +37,7 @@ public class TestRollingCharBuffer extends LuceneTestCase {
|
|||
if (stringLen == 0) {
|
||||
s = "";
|
||||
} else {
|
||||
s = _TestUtil.randomUnicodeString(random, stringLen);
|
||||
s = TestUtil.randomUnicodeString(random, stringLen);
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: iter=" + iter + " s.length()=" + s.length());
|
||||
|
@ -59,7 +59,7 @@ public class TestRollingCharBuffer extends LuceneTestCase {
|
|||
availCount++;
|
||||
} else if (random.nextBoolean()) {
|
||||
// Read previous char
|
||||
int pos = _TestUtil.nextInt(random, nextRead-availCount, nextRead-1);
|
||||
int pos = TestUtil.nextInt(random, nextRead - availCount, nextRead - 1);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" old char pos=" + pos);
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ public class TestRollingCharBuffer extends LuceneTestCase {
|
|||
if (availCount == 1) {
|
||||
length = 1;
|
||||
} else {
|
||||
length = _TestUtil.nextInt(random, 1, availCount);
|
||||
length = TestUtil.nextInt(random, 1, availCount);
|
||||
}
|
||||
int start;
|
||||
if (length == availCount) {
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.lucene.search.BooleanClause.Occur;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
|
||||
import com.ibm.icu.text.Collator;
|
||||
|
@ -95,7 +95,7 @@ public class TestICUCollationDocValuesField extends LuceneTestCase {
|
|||
|
||||
int numDocs = atLeast(500);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
String value = _TestUtil.randomSimpleString(random());
|
||||
String value = TestUtil.randomSimpleString(random());
|
||||
field.setStringValue(value);
|
||||
collationField.setStringValue(value);
|
||||
iw.addDocument(doc);
|
||||
|
@ -107,8 +107,8 @@ public class TestICUCollationDocValuesField extends LuceneTestCase {
|
|||
|
||||
int numChecks = atLeast(100);
|
||||
for (int i = 0; i < numChecks; i++) {
|
||||
String start = _TestUtil.randomSimpleString(random());
|
||||
String end = _TestUtil.randomSimpleString(random());
|
||||
String start = TestUtil.randomSimpleString(random());
|
||||
String end = TestUtil.randomSimpleString(random());
|
||||
BytesRef lowerVal = new BytesRef(collator.getCollationKey(start).toByteArray());
|
||||
BytesRef upperVal = new BytesRef(collator.getCollationKey(end).toByteArray());
|
||||
Query query = new ConstantScoreQuery(FieldCacheRangeFilter.newBytesRefRange("collated", lowerVal, upperVal, true, true));
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.analysis.ja;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
|
@ -27,8 +26,8 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
|
||||
@Slow
|
||||
|
@ -52,7 +51,7 @@ public class TestExtendedMode extends BaseTokenStreamTestCase {
|
|||
public void testSurrogates2() throws IOException {
|
||||
int numIterations = atLeast(1000);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
String s = _TestUtil.randomUnicodeString(random(), 100);
|
||||
String s = TestUtil.randomUnicodeString(random(), 100);
|
||||
try (TokenStream ts = analyzer.tokenStream("foo", s)) {
|
||||
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
|
||||
ts.reset();
|
||||
|
|
|
@ -35,8 +35,8 @@ import org.apache.lucene.analysis.ja.dict.UserDictionary;
|
|||
import org.apache.lucene.analysis.ja.tokenattributes.*;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
|
||||
@Slow
|
||||
|
@ -212,7 +212,7 @@ public class TestJapaneseTokenizer extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testLargeDocReliability() throws Exception {
|
||||
for (int i = 0; i < 100; i++) {
|
||||
String s = _TestUtil.randomUnicodeString(random(), 10000);
|
||||
String s = TestUtil.randomUnicodeString(random(), 10000);
|
||||
try (TokenStream ts = analyzer.tokenStream("foo", s)) {
|
||||
ts.reset();
|
||||
while (ts.incrementToken()) {
|
||||
|
@ -235,7 +235,7 @@ public class TestJapaneseTokenizer extends BaseTokenStreamTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: iter=" + i);
|
||||
}
|
||||
String s = _TestUtil.randomUnicodeString(random(), 100);
|
||||
String s = TestUtil.randomUnicodeString(random(), 100);
|
||||
try (TokenStream ts = analyzer.tokenStream("foo", s)) {
|
||||
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
|
||||
ts.reset();
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
package org.apache.lucene.analysis.phonetic;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
|
@ -27,7 +26,7 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.KeywordTokenizer;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase {
|
||||
|
||||
|
@ -79,7 +78,7 @@ public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testRandom() throws Exception {
|
||||
final int codeLen = _TestUtil.nextInt(random(), 1, 8);
|
||||
final int codeLen = TestUtil.nextInt(random(), 1, 8);
|
||||
Analyzer a = new Analyzer() {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -65,18 +65,17 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.LineNumberReader;
|
||||
import java.net.URI;
|
||||
import java.util.Locale;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestCompile extends LuceneTestCase {
|
||||
|
||||
public void testCompile() throws Exception {
|
||||
File dir = _TestUtil.getTempDir("testCompile");
|
||||
File dir = TestUtil.getTempDir("testCompile");
|
||||
dir.mkdirs();
|
||||
InputStream input = getClass().getResourceAsStream("testRules.txt");
|
||||
File output = new File(dir, "testRules.txt");
|
||||
|
@ -92,7 +91,7 @@ public class TestCompile extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testCompileBackwards() throws Exception {
|
||||
File dir = _TestUtil.getTempDir("testCompile");
|
||||
File dir = TestUtil.getTempDir("testCompile");
|
||||
dir.mkdirs();
|
||||
InputStream input = getClass().getResourceAsStream("testRules.txt");
|
||||
File output = new File(dir, "testRules.txt");
|
||||
|
@ -108,7 +107,7 @@ public class TestCompile extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testCompileMulti() throws Exception {
|
||||
File dir = _TestUtil.getTempDir("testCompile");
|
||||
File dir = TestUtil.getTempDir("testCompile");
|
||||
dir.mkdirs();
|
||||
InputStream input = getClass().getResourceAsStream("testRules.txt");
|
||||
File output = new File(dir, "testRules.txt");
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.io.StringReader;
|
|||
|
||||
import org.apache.lucene.benchmark.byTask.Benchmark;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
@ -36,7 +36,7 @@ public abstract class BenchmarkTestCase extends LuceneTestCase {
|
|||
|
||||
@BeforeClass
|
||||
public static void beforeClassBenchmarkTestCase() {
|
||||
WORKDIR = _TestUtil.getTempDir("benchmark");
|
||||
WORKDIR = TestUtil.getTempDir("benchmark");
|
||||
WORKDIR.delete();
|
||||
WORKDIR.mkdirs();
|
||||
}
|
||||
|
|
|
@ -48,7 +48,6 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
|||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LogDocMergePolicy;
|
||||
import org.apache.lucene.index.LogMergePolicy;
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SerialMergeScheduler;
|
||||
|
@ -60,7 +59,8 @@ import org.apache.lucene.search.DocIdSetIterator;
|
|||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Test very simply that perf tasks - simple algorithms - are doing what they should.
|
||||
|
@ -505,7 +505,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
|
|||
TermsEnum termsEnum = terms.iterator(null);
|
||||
DocsEnum docs = null;
|
||||
while(termsEnum.next() != null) {
|
||||
docs = _TestUtil.docs(random(), termsEnum, MultiFields.getLiveDocs(reader), docs, DocsEnum.FLAG_FREQS);
|
||||
docs = TestUtil.docs(random(), termsEnum, MultiFields.getLiveDocs(reader), docs, DocsEnum.FLAG_FREQS);
|
||||
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
totalTokenCount2 += docs.freq();
|
||||
}
|
||||
|
|
|
@ -31,7 +31,8 @@ import org.apache.lucene.benchmark.byTask.feeds.TrecDocParser.ParsePathType;
|
|||
import org.apache.lucene.benchmark.byTask.utils.Config;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TrecContentSourceTest extends LuceneTestCase {
|
||||
|
||||
|
@ -343,8 +344,8 @@ public class TrecContentSourceTest extends LuceneTestCase {
|
|||
* supported formats - bzip, gzip, txt.
|
||||
*/
|
||||
public void testTrecFeedDirAllTypes() throws Exception {
|
||||
File dataDir = _TestUtil.getTempDir("trecFeedAllTypes");
|
||||
_TestUtil.unzip(getDataFile("trecdocs.zip"), dataDir);
|
||||
File dataDir = TestUtil.getTempDir("trecFeedAllTypes");
|
||||
TestUtil.unzip(getDataFile("trecdocs.zip"), dataDir);
|
||||
TrecContentSource tcs = new TrecContentSource();
|
||||
Properties props = new Properties();
|
||||
props.setProperty("print.props", "false");
|
||||
|
|
|
@ -30,7 +30,8 @@ import org.apache.lucene.index.IndexWriter;
|
|||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
/** Tests the functionality of {@link AddIndexesTask}. */
|
||||
|
@ -40,7 +41,7 @@ public class AddIndexesTaskTest extends BenchmarkTestCase {
|
|||
|
||||
@BeforeClass
|
||||
public static void beforeClassAddIndexesTaskTest() throws Exception {
|
||||
testDir = _TestUtil.getTempDir("addIndexesTask");
|
||||
testDir = TestUtil.getTempDir("addIndexesTask");
|
||||
|
||||
// create a dummy index under inputDir
|
||||
inputDir = new File(testDir, "input");
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.io.BufferedReader;
|
|||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
|
@ -30,9 +29,8 @@ import java.io.OutputStreamWriter;
|
|||
|
||||
import org.apache.commons.compress.compressors.CompressorStreamFactory;
|
||||
import org.apache.lucene.benchmark.BenchmarkTestCase;
|
||||
import org.apache.lucene.benchmark.byTask.utils.StreamUtils;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
@ -138,14 +136,14 @@ public class StreamUtilsTest extends BenchmarkTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
testDir = new File(getWorkDir(),"ContentSourceTest");
|
||||
_TestUtil.rmDir(testDir);
|
||||
TestUtil.rmDir(testDir);
|
||||
assertTrue(testDir.mkdirs());
|
||||
}
|
||||
|
||||
@Override
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
_TestUtil.rmDir(testDir);
|
||||
TestUtil.rmDir(testDir);
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -238,7 +238,7 @@ public abstract class ClassificationTestBase<T> extends LuceneTestCase {
|
|||
private String createRandomString(Random random) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
builder.append(_TestUtil.randomSimpleString(random, 5));
|
||||
builder.append(TestUtil.randomSimpleString(random, 5));
|
||||
builder.append(" ");
|
||||
}
|
||||
return builder.toString();
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.index.RandomIndexWriter;
|
|||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
@ -71,8 +71,8 @@ public class DataSplitterTest extends LuceneTestCase {
|
|||
for (int i = 0; i < 100; i++) {
|
||||
doc = new Document();
|
||||
doc.add(new Field(idFieldName, Integer.toString(i), ft));
|
||||
doc.add(new Field(textFieldName, _TestUtil.randomUnicodeString(rnd, 1024), ft));
|
||||
doc.add(new Field(classFieldName, _TestUtil.randomUnicodeString(rnd, 10), ft));
|
||||
doc.add(new Field(textFieldName, TestUtil.randomUnicodeString(rnd, 1024), ft));
|
||||
doc.add(new Field(classFieldName, TestUtil.randomUnicodeString(rnd, 10), ft));
|
||||
indexWriter.addDocument(doc, analyzer);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,13 +20,14 @@ package org.apache.lucene.codecs.blockterms;
|
|||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.lucene41ords.Lucene41WithOrds;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Basic tests of a PF using FixedGap terms dictionary
|
||||
*/
|
||||
public class TestFixedGapPostingsFormat extends BasePostingsFormatTestCase {
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat(new Lucene41WithOrds(_TestUtil.nextInt(random(), 1, 1000)));
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41WithOrds(TestUtil.nextInt(random(), 1, 1000)));
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -20,13 +20,13 @@ package org.apache.lucene.codecs.blockterms;
|
|||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.lucene41vargap.Lucene41VarGapFixedInterval;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Basic tests of a PF using VariableGap terms dictionary (fixed interval)
|
||||
*/
|
||||
public class TestVarGapDocFreqIntervalPostingsFormat extends BasePostingsFormatTestCase {
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat(new Lucene41VarGapFixedInterval(_TestUtil.nextInt(random(), 1, 1000)));
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41VarGapFixedInterval(TestUtil.nextInt(random(), 1, 1000)));
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -20,13 +20,13 @@ package org.apache.lucene.codecs.blockterms;
|
|||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.lucene41vargap.Lucene41VarGapDocFreqInterval;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Basic tests of a PF using VariableGap terms dictionary (fixed interval, docFreq threshold)
|
||||
*/
|
||||
public class TestVarGapFixedIntervalPostingsFormat extends BasePostingsFormatTestCase {
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat(new Lucene41VarGapDocFreqInterval(_TestUtil.nextInt(random(), 1, 100), _TestUtil.nextInt(random(), 1, 1000)));
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41VarGapDocFreqInterval(TestUtil.nextInt(random(), 1, 100), TestUtil.nextInt(random(), 1, 1000)));
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -19,13 +19,13 @@ package org.apache.lucene.codecs.bloom;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Basic tests for BloomPostingsFormat
|
||||
*/
|
||||
public class TestBloomPostingsFormat extends BasePostingsFormatTestCase {
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat(new TestBloomFilteredLucene41Postings());
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new TestBloomFilteredLucene41Postings());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -19,13 +19,13 @@ package org.apache.lucene.codecs.diskdv;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests DiskDocValuesFormat
|
||||
*/
|
||||
public class TestDiskDocValuesFormat extends BaseCompressingDocValuesFormatTestCase {
|
||||
private final Codec codec = _TestUtil.alwaysDocValuesFormat(new DiskDocValuesFormat());
|
||||
private final Codec codec = TestUtil.alwaysDocValuesFormat(new DiskDocValuesFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -20,14 +20,14 @@ package org.apache.lucene.codecs.intblock;
|
|||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.mockintblock.MockFixedIntBlockPostingsFormat;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Basic tests for FixedIntBlock
|
||||
*/
|
||||
public class TestFixedIntBlockPostingsFormat extends BasePostingsFormatTestCase {
|
||||
// TODO: randomize blocksize
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat(new MockFixedIntBlockPostingsFormat());
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new MockFixedIntBlockPostingsFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -20,14 +20,15 @@ package org.apache.lucene.codecs.intblock;
|
|||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.mockintblock.MockVariableIntBlockPostingsFormat;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Basic tests for VariableIntBlock
|
||||
*/
|
||||
public class TestVariableIntBlockPostingsFormat extends BasePostingsFormatTestCase {
|
||||
// TODO: randomize blocksize
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat( new MockVariableIntBlockPostingsFormat());
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new MockVariableIntBlockPostingsFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -19,13 +19,13 @@ package org.apache.lucene.codecs.memory;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BaseDocValuesFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests DirectDocValuesFormat
|
||||
*/
|
||||
public class TestDirectDocValuesFormat extends BaseDocValuesFormatTestCase {
|
||||
private final Codec codec = _TestUtil.alwaysDocValuesFormat(new DirectDocValuesFormat());
|
||||
private final Codec codec = TestUtil.alwaysDocValuesFormat(new DirectDocValuesFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -19,14 +19,15 @@ package org.apache.lucene.codecs.memory;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests DirectPostingsFormat
|
||||
*/
|
||||
public class TestDirectPostingsFormat extends BasePostingsFormatTestCase {
|
||||
// TODO: randomize parameters
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat(new DirectPostingsFormat());
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new DirectPostingsFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -19,13 +19,13 @@ package org.apache.lucene.codecs.memory;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests MemoryDocValuesFormat
|
||||
*/
|
||||
public class TestMemoryDocValuesFormat extends BaseCompressingDocValuesFormatTestCase {
|
||||
private final Codec codec = _TestUtil.alwaysDocValuesFormat(new MemoryDocValuesFormat());
|
||||
private final Codec codec = TestUtil.alwaysDocValuesFormat(new MemoryDocValuesFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -19,14 +19,15 @@ package org.apache.lucene.codecs.memory;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests MemoryPostingsFormat
|
||||
*/
|
||||
public class TestMemoryPostingsFormat extends BasePostingsFormatTestCase {
|
||||
// TODO: randomize doPack
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat());
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -37,9 +37,8 @@ import org.apache.lucene.index.RandomIndexWriter;
|
|||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Pulses 10k terms/docs,
|
||||
|
@ -51,9 +50,9 @@ import org.apache.lucene.util._TestUtil;
|
|||
public class Test10KPulsings extends LuceneTestCase {
|
||||
public void test10kPulsed() throws Exception {
|
||||
// we always run this test with pulsing codec.
|
||||
Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1));
|
||||
Codec cp = TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1));
|
||||
|
||||
File f = _TestUtil.getTempDir("10kpulsed");
|
||||
File f = TestUtil.getTempDir("10kpulsed");
|
||||
BaseDirectoryWrapper dir = newFSDirectory(f);
|
||||
dir.setCheckIndexOnClose(false); // we do this ourselves explicitly
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
|
||||
|
@ -62,7 +61,7 @@ public class Test10KPulsings extends LuceneTestCase {
|
|||
Document document = new Document();
|
||||
FieldType ft = new FieldType(TextField.TYPE_STORED);
|
||||
|
||||
switch(_TestUtil.nextInt(random(), 0, 2)) {
|
||||
switch(TestUtil.nextInt(random(), 0, 2)) {
|
||||
case 0: ft.setIndexOptions(IndexOptions.DOCS_ONLY); break;
|
||||
case 1: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); break;
|
||||
default: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); break;
|
||||
|
@ -87,13 +86,13 @@ public class Test10KPulsings extends LuceneTestCase {
|
|||
for (int i = 0; i < 10050; i++) {
|
||||
String expected = df.format(i);
|
||||
assertEquals(expected, te.next().utf8ToString());
|
||||
de = _TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
|
||||
de = TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
|
||||
assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc());
|
||||
}
|
||||
ir.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
@ -101,10 +100,10 @@ public class Test10KPulsings extends LuceneTestCase {
|
|||
*/
|
||||
public void test10kNotPulsed() throws Exception {
|
||||
// we always run this test with pulsing codec.
|
||||
int freqCutoff = _TestUtil.nextInt(random(), 1, 10);
|
||||
Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(freqCutoff));
|
||||
int freqCutoff = TestUtil.nextInt(random(), 1, 10);
|
||||
Codec cp = TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(freqCutoff));
|
||||
|
||||
File f = _TestUtil.getTempDir("10knotpulsed");
|
||||
File f = TestUtil.getTempDir("10knotpulsed");
|
||||
BaseDirectoryWrapper dir = newFSDirectory(f);
|
||||
dir.setCheckIndexOnClose(false); // we do this ourselves explicitly
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
|
||||
|
@ -113,7 +112,7 @@ public class Test10KPulsings extends LuceneTestCase {
|
|||
Document document = new Document();
|
||||
FieldType ft = new FieldType(TextField.TYPE_STORED);
|
||||
|
||||
switch(_TestUtil.nextInt(random(), 0, 2)) {
|
||||
switch(TestUtil.nextInt(random(), 0, 2)) {
|
||||
case 0: ft.setIndexOptions(IndexOptions.DOCS_ONLY); break;
|
||||
case 1: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); break;
|
||||
default: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); break;
|
||||
|
@ -145,13 +144,13 @@ public class Test10KPulsings extends LuceneTestCase {
|
|||
for (int i = 0; i < 10050; i++) {
|
||||
String expected = df.format(i);
|
||||
assertEquals(expected, te.next().utf8ToString());
|
||||
de = _TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
|
||||
de = TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
|
||||
assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc());
|
||||
}
|
||||
ir.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,14 +19,15 @@ package org.apache.lucene.codecs.pulsing;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests PulsingPostingsFormat
|
||||
*/
|
||||
public class TestPulsingPostingsFormat extends BasePostingsFormatTestCase {
|
||||
// TODO: randomize cutoff
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat());
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.CheckIndex;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
|
@ -36,7 +35,7 @@ import org.apache.lucene.index.TermsEnum;
|
|||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests that pulsing codec reuses its enums and wrapped enums
|
||||
|
@ -45,7 +44,7 @@ public class TestPulsingReuse extends LuceneTestCase {
|
|||
// TODO: this is a basic test. this thing is complicated, add more
|
||||
public void testSophisticatedReuse() throws Exception {
|
||||
// we always run this test with pulsing codec.
|
||||
Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1));
|
||||
Codec cp = TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1));
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp));
|
||||
|
@ -83,7 +82,7 @@ public class TestPulsingReuse extends LuceneTestCase {
|
|||
/** tests reuse with Pulsing1(Pulsing2(Standard)) */
|
||||
public void testNestedPulsing() throws Exception {
|
||||
// we always run this test with pulsing codec.
|
||||
Codec cp = _TestUtil.alwaysPostingsFormat(new NestedPulsingPostingsFormat());
|
||||
Codec cp = TestUtil.alwaysPostingsFormat(new NestedPulsingPostingsFormat());
|
||||
BaseDirectoryWrapper dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp));
|
||||
|
|
|
@ -20,14 +20,14 @@ package org.apache.lucene.codecs.sep;
|
|||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.mocksep.MockSepPostingsFormat;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests sep layout
|
||||
*/
|
||||
public class TestSepPostingsFormat extends BasePostingsFormatTestCase {
|
||||
// TODO: randomize cutoff
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat(new MockSepPostingsFormat());
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new MockSepPostingsFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.index.RandomIndexWriter;
|
|||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.AutomatonTestUtil;
|
||||
import org.apache.lucene.util.automaton.BasicAutomata;
|
||||
|
@ -232,7 +232,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
|
|||
for (int i = 0; i < iters; i++) {
|
||||
final CharacterRunAutomaton dfa = new CharacterRunAutomaton(AutomatonTestUtil.randomAutomaton(random()));
|
||||
final boolean lowercase = random().nextBoolean();
|
||||
final int limit = _TestUtil.nextInt(random(), 0, 500);
|
||||
final int limit = TestUtil.nextInt(random(), 0, 500);
|
||||
Analyzer a = new Analyzer() {
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
|
@ -248,7 +248,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
|
|||
public void testForwardOffsets() throws Exception {
|
||||
int num = atLeast(10000);
|
||||
for (int i = 0; i < num; i++) {
|
||||
String s = _TestUtil.randomHtmlishString(random(), 20);
|
||||
String s = TestUtil.randomHtmlishString(random(), 20);
|
||||
StringReader reader = new StringReader(s);
|
||||
MockCharFilter charfilter = new MockCharFilter(reader, 2);
|
||||
MockAnalyzer analyzer = new MockAnalyzer(random());
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util.Attribute;
|
||||
import org.apache.lucene.util.AttributeImpl;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
import java.io.StringReader;
|
||||
import java.util.HashMap;
|
||||
|
@ -246,17 +246,17 @@ public class TestToken extends LuceneTestCase {
|
|||
|
||||
public void testAttributeReflection() throws Exception {
|
||||
Token t = new Token("foobar", 6, 22, 8);
|
||||
_TestUtil.assertAttributeReflection(t,
|
||||
new HashMap<String,Object>() {{
|
||||
put(CharTermAttribute.class.getName() + "#term", "foobar");
|
||||
put(TermToBytesRefAttribute.class.getName() + "#bytes", new BytesRef("foobar"));
|
||||
put(OffsetAttribute.class.getName() + "#startOffset", 6);
|
||||
put(OffsetAttribute.class.getName() + "#endOffset", 22);
|
||||
put(PositionIncrementAttribute.class.getName() + "#positionIncrement", 1);
|
||||
put(PayloadAttribute.class.getName() + "#payload", null);
|
||||
put(TypeAttribute.class.getName() + "#type", TypeAttribute.DEFAULT_TYPE);
|
||||
put(FlagsAttribute.class.getName() + "#flags", 8);
|
||||
}});
|
||||
TestUtil.assertAttributeReflection(t,
|
||||
new HashMap<String, Object>() {{
|
||||
put(CharTermAttribute.class.getName() + "#term", "foobar");
|
||||
put(TermToBytesRefAttribute.class.getName() + "#bytes", new BytesRef("foobar"));
|
||||
put(OffsetAttribute.class.getName() + "#startOffset", 6);
|
||||
put(OffsetAttribute.class.getName() + "#endOffset", 22);
|
||||
put(PositionIncrementAttribute.class.getName() + "#positionIncrement", 1);
|
||||
put(PayloadAttribute.class.getName() + "#payload", null);
|
||||
put(TypeAttribute.class.getName() + "#type", TypeAttribute.DEFAULT_TYPE);
|
||||
put(FlagsAttribute.class.getName() + "#flags", 8);
|
||||
}});
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -20,7 +20,8 @@ package org.apache.lucene.analysis.tokenattributes;
|
|||
import org.apache.lucene.analysis.TestToken;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
import java.nio.CharBuffer;
|
||||
import java.util.HashMap;
|
||||
import java.util.Formatter;
|
||||
|
@ -132,7 +133,7 @@ public class TestCharTermAttributeImpl extends LuceneTestCase {
|
|||
public void testAttributeReflection() throws Exception {
|
||||
CharTermAttributeImpl t = new CharTermAttributeImpl();
|
||||
t.append("foobar");
|
||||
_TestUtil.assertAttributeReflection(t, new HashMap<String,Object>() {{
|
||||
TestUtil.assertAttributeReflection(t, new HashMap<String, Object>() {{
|
||||
put(CharTermAttribute.class.getName() + "#term", "foobar");
|
||||
put(TermToBytesRefAttribute.class.getName() + "#bytes", new BytesRef("foobar"));
|
||||
}});
|
||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.lucene.analysis.tokenattributes;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
import java.util.Collections;
|
||||
|
@ -27,21 +27,21 @@ public class TestSimpleAttributeImpl extends LuceneTestCase {
|
|||
|
||||
// this checks using reflection API if the defaults are correct
|
||||
public void testAttributes() {
|
||||
_TestUtil.assertAttributeReflection(new PositionIncrementAttributeImpl(),
|
||||
Collections.singletonMap(PositionIncrementAttribute.class.getName()+"#positionIncrement", 1));
|
||||
_TestUtil.assertAttributeReflection(new PositionLengthAttributeImpl(),
|
||||
Collections.singletonMap(PositionLengthAttribute.class.getName()+"#positionLength", 1));
|
||||
_TestUtil.assertAttributeReflection(new FlagsAttributeImpl(),
|
||||
Collections.singletonMap(FlagsAttribute.class.getName()+"#flags", 0));
|
||||
_TestUtil.assertAttributeReflection(new TypeAttributeImpl(),
|
||||
Collections.singletonMap(TypeAttribute.class.getName()+"#type", TypeAttribute.DEFAULT_TYPE));
|
||||
_TestUtil.assertAttributeReflection(new PayloadAttributeImpl(),
|
||||
Collections.singletonMap(PayloadAttribute.class.getName()+"#payload", null));
|
||||
_TestUtil.assertAttributeReflection(new KeywordAttributeImpl(),
|
||||
Collections.singletonMap(KeywordAttribute.class.getName()+"#keyword", false));
|
||||
_TestUtil.assertAttributeReflection(new OffsetAttributeImpl(), new HashMap<String,Object>() {{
|
||||
put(OffsetAttribute.class.getName()+"#startOffset", 0);
|
||||
put(OffsetAttribute.class.getName()+"#endOffset", 0);
|
||||
TestUtil.assertAttributeReflection(new PositionIncrementAttributeImpl(),
|
||||
Collections.singletonMap(PositionIncrementAttribute.class.getName() + "#positionIncrement", 1));
|
||||
TestUtil.assertAttributeReflection(new PositionLengthAttributeImpl(),
|
||||
Collections.singletonMap(PositionLengthAttribute.class.getName() + "#positionLength", 1));
|
||||
TestUtil.assertAttributeReflection(new FlagsAttributeImpl(),
|
||||
Collections.singletonMap(FlagsAttribute.class.getName() + "#flags", 0));
|
||||
TestUtil.assertAttributeReflection(new TypeAttributeImpl(),
|
||||
Collections.singletonMap(TypeAttribute.class.getName() + "#type", TypeAttribute.DEFAULT_TYPE));
|
||||
TestUtil.assertAttributeReflection(new PayloadAttributeImpl(),
|
||||
Collections.singletonMap(PayloadAttribute.class.getName() + "#payload", null));
|
||||
TestUtil.assertAttributeReflection(new KeywordAttributeImpl(),
|
||||
Collections.singletonMap(KeywordAttribute.class.getName() + "#keyword", false));
|
||||
TestUtil.assertAttributeReflection(new OffsetAttributeImpl(), new HashMap<String, Object>() {{
|
||||
put(OffsetAttribute.class.getName() + "#startOffset", 0);
|
||||
put(OffsetAttribute.class.getName() + "#endOffset", 0);
|
||||
}});
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.lucene.store.ByteArrayDataInput;
|
|||
import org.apache.lucene.store.ByteArrayDataOutput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
|
||||
|
@ -85,8 +85,8 @@ public abstract class AbstractTestCompressionMode extends LuceneTestCase {
|
|||
final int iterations = atLeast(10);
|
||||
for (int i = 0; i < iterations; ++i) {
|
||||
final byte[] decompressed = randomArray();
|
||||
final int off = random().nextBoolean() ? 0 : _TestUtil.nextInt(random(), 0, decompressed.length);
|
||||
final int len = random().nextBoolean() ? decompressed.length - off : _TestUtil.nextInt(random(), 0, decompressed.length - off);
|
||||
final int off = random().nextBoolean() ? 0 : TestUtil.nextInt(random(), 0, decompressed.length);
|
||||
final int len = random().nextBoolean() ? decompressed.length - off : TestUtil.nextInt(random(), 0, decompressed.length - off);
|
||||
final byte[] compressed = compress(decompressed, off, len);
|
||||
final byte[] restored = decompress(compressed, len);
|
||||
assertArrayEquals(Arrays.copyOfRange(decompressed, off, off+len), restored);
|
||||
|
@ -138,7 +138,7 @@ public abstract class AbstractTestCompressionMode extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testConstant() throws IOException {
|
||||
final byte[] decompressed = new byte[_TestUtil.nextInt(random(), 1, 10000)];
|
||||
final byte[] decompressed = new byte[TestUtil.nextInt(random(), 1, 10000)];
|
||||
Arrays.fill(decompressed, (byte) random().nextInt());
|
||||
test(decompressed);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,8 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* <code>TestBitVector</code> tests the <code>BitVector</code>, obviously.
|
||||
|
@ -231,10 +232,10 @@ public class TestBitVector extends LuceneTestCase
|
|||
|
||||
public void testClearedBitNearEnd() throws IOException {
|
||||
Directory d = newDirectory();
|
||||
final int numBits = _TestUtil.nextInt(random(), 7, 1000);
|
||||
final int numBits = TestUtil.nextInt(random(), 7, 1000);
|
||||
BitVector bv = new BitVector(numBits);
|
||||
bv.invertAll();
|
||||
bv.clear(numBits-_TestUtil.nextInt(random(), 1, 7));
|
||||
bv.clear(numBits- TestUtil.nextInt(random(), 1, 7));
|
||||
bv.write(d, "test", newIOContext(random()));
|
||||
assertEquals(numBits-1, bv.count());
|
||||
d.close();
|
||||
|
@ -242,7 +243,7 @@ public class TestBitVector extends LuceneTestCase
|
|||
|
||||
public void testMostlySet() throws IOException {
|
||||
Directory d = newDirectory();
|
||||
final int numBits = _TestUtil.nextInt(random(), 30, 1000);
|
||||
final int numBits = TestUtil.nextInt(random(), 30, 1000);
|
||||
for(int numClear=0;numClear<20;numClear++) {
|
||||
BitVector bv = new BitVector(numBits);
|
||||
bv.invertAll();
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.index.RandomIndexWriter;
|
|||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
public class TestLucene40PostingsReader extends LuceneTestCase {
|
||||
|
@ -53,7 +53,7 @@ public class TestLucene40PostingsReader extends LuceneTestCase {
|
|||
* depends heavily on term vectors cross-check at checkIndex
|
||||
*/
|
||||
public void testPostings() throws Exception {
|
||||
Directory dir = newFSDirectory(_TestUtil.getTempDir("postings"));
|
||||
Directory dir = newFSDirectory(TestUtil.getTempDir("postings"));
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
iwc.setCodec(Codec.forName("Lucene40"));
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
|
||||
|
@ -116,7 +116,7 @@ public class TestLucene40PostingsReader extends LuceneTestCase {
|
|||
StringBuilder sb = new StringBuilder();
|
||||
int i = random().nextInt(terms.length);
|
||||
while (i < terms.length) {
|
||||
int tf = _TestUtil.nextInt(random(), 1, maxTF);
|
||||
int tf = TestUtil.nextInt(random(), 1, maxTF);
|
||||
for (int j = 0; j < tf; j++) {
|
||||
shuffled.add(terms[i]);
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
// TODO: really this should be in BaseTestPF or somewhere else? useful test!
|
||||
|
@ -49,7 +49,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
|
|||
|
||||
public void testReuseDocsEnumNoReuse() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
Codec cp = _TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat());
|
||||
Codec cp = TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat());
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp));
|
||||
int numdocs = atLeast(20);
|
||||
|
@ -76,7 +76,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
|
|||
// tests for reuse only if bits are the same either null or the same instance
|
||||
public void testReuseDocsEnumSameBitsOrNull() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
Codec cp = _TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat());
|
||||
Codec cp = TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat());
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp));
|
||||
int numdocs = atLeast(20);
|
||||
|
@ -120,7 +120,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
|
|||
// make sure we never reuse from another reader even if it is the same field & codec etc
|
||||
public void testReuseDocsEnumDifferentReader() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
Codec cp = _TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat());
|
||||
Codec cp = TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat());
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp));
|
||||
int numdocs = atLeast(20);
|
||||
|
|
|
@ -19,13 +19,13 @@ package org.apache.lucene.codecs.lucene41;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests BlockPostingsFormat
|
||||
*/
|
||||
public class TestBlockPostingsFormat extends BasePostingsFormatTestCase {
|
||||
private final Codec codec = _TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat());
|
||||
private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
|||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests special cases of BlockPostingsFormat
|
||||
|
@ -43,9 +43,9 @@ public class TestBlockPostingsFormat2 extends LuceneTestCase {
|
|||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
dir = newFSDirectory(_TestUtil.getTempDir("testDFBlockSize"));
|
||||
dir = newFSDirectory(TestUtil.getTempDir("testDFBlockSize"));
|
||||
iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
iwc.setCodec(_TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
|
||||
iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
|
||||
iw = new RandomIndexWriter(random(), dir, iwc.clone());
|
||||
iw.setDoRandomForceMerge(false); // we will ourselves
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ public class TestBlockPostingsFormat2 extends LuceneTestCase {
|
|||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
iw.close();
|
||||
_TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
|
||||
TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
|
||||
iwc.setOpenMode(OpenMode.APPEND);
|
||||
IndexWriter iw = new IndexWriter(dir, iwc.clone());
|
||||
iw.forceMerge(1);
|
||||
|
|
|
@ -17,7 +17,6 @@ package org.apache.lucene.codecs.lucene41;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.Reader;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
|
@ -53,7 +52,8 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.English;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.automaton.AutomatonTestUtil;
|
||||
import org.apache.lucene.util.automaton.CompiledAutomaton;
|
||||
import org.apache.lucene.util.automaton.RegExp;
|
||||
|
@ -83,7 +83,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
|
|||
}
|
||||
};
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
|
||||
iwc.setCodec(_TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
|
||||
iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
|
||||
// TODO we could actually add more fields implemented with different PFs
|
||||
// or, just put this test into the usual rotation?
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc.clone());
|
||||
|
@ -123,7 +123,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
|
|||
doc.add(field7);
|
||||
doc.add(field8);
|
||||
for (int i = 0; i < MAXDOC; i++) {
|
||||
String stringValue = Integer.toString(i) + " verycommon " + English.intToEnglish(i).replace('-', ' ') + " " + _TestUtil.randomSimpleString(random());
|
||||
String stringValue = Integer.toString(i) + " verycommon " + English.intToEnglish(i).replace('-', ' ') + " " + TestUtil.randomSimpleString(random());
|
||||
field1.setStringValue(stringValue);
|
||||
field2.setStringValue(stringValue);
|
||||
field3.setStringValue(stringValue);
|
||||
|
@ -136,7 +136,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
|
|||
}
|
||||
iw.close();
|
||||
verify(dir);
|
||||
_TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
|
||||
TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
|
||||
iwc.setOpenMode(OpenMode.APPEND);
|
||||
IndexWriter iw2 = new IndexWriter(dir, iwc.clone());
|
||||
iw2.forceMerge(1);
|
||||
|
|
|
@ -19,13 +19,13 @@ package org.apache.lucene.codecs.lucene45;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests Lucene45DocValuesFormat
|
||||
*/
|
||||
public class TestLucene45DocValuesFormat extends BaseCompressingDocValuesFormatTestCase {
|
||||
private final Codec codec = _TestUtil.alwaysDocValuesFormat(new Lucene45DocValuesFormat());
|
||||
private final Codec codec = TestUtil.alwaysDocValuesFormat(new Lucene45DocValuesFormat());
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
|
|
|
@ -46,7 +46,8 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Basic tests of PerFieldDocValuesFormat
|
||||
|
@ -67,7 +68,7 @@ public class TestPerFieldDocValuesFormat extends BaseDocValuesFormatTestCase {
|
|||
|
||||
@Override
|
||||
protected boolean codecAcceptsHugeBinaryValues(String field) {
|
||||
return _TestUtil.fieldSupportsHugeBinaryDocValues(field);
|
||||
return TestUtil.fieldSupportsHugeBinaryDocValues(field);
|
||||
}
|
||||
|
||||
// just a simple trivial test
|
||||
|
|
|
@ -43,7 +43,8 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -107,7 +108,7 @@ public class TestPerFieldPostingsFormat2 extends LuceneTestCase {
|
|||
addDocs2(writer, 10);
|
||||
writer.commit();
|
||||
assertEquals(30, writer.maxDoc());
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
writer.forceMerge(1);
|
||||
assertEquals(30, writer.maxDoc());
|
||||
writer.close();
|
||||
|
@ -240,7 +241,7 @@ public class TestPerFieldPostingsFormat2 extends LuceneTestCase {
|
|||
final int docsPerRound = 97;
|
||||
int numRounds = atLeast(1);
|
||||
for (int i = 0; i < numRounds; i++) {
|
||||
int num = _TestUtil.nextInt(random(), 30, 60);
|
||||
int num = TestUtil.nextInt(random(), 30, 60);
|
||||
IndexWriterConfig config = newIndexWriterConfig(random(),
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
config.setOpenMode(OpenMode.CREATE_OR_APPEND);
|
||||
|
@ -251,7 +252,7 @@ public class TestPerFieldPostingsFormat2 extends LuceneTestCase {
|
|||
FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
|
||||
customType.setTokenized(random().nextBoolean());
|
||||
customType.setOmitNorms(random().nextBoolean());
|
||||
Field field = newField("" + k, _TestUtil
|
||||
Field field = newField("" + k, TestUtil
|
||||
.randomRealisticUnicodeString(random(), 128), customType);
|
||||
doc.add(field);
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@ import org.apache.lucene.store.ByteArrayDataOutput;
|
|||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.junit.Ignore;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
|
@ -38,7 +38,7 @@ public class Test2BBinaryDocValues extends LuceneTestCase {
|
|||
|
||||
// indexes Integer.MAX_VALUE docs with a fixed binary field
|
||||
public void testFixedBinary() throws Exception {
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BFixedBinary"));
|
||||
BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BFixedBinary"));
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ public class Test2BBinaryDocValues extends LuceneTestCase {
|
|||
|
||||
// indexes Integer.MAX_VALUE docs with a variable binary field
|
||||
public void testVariableBinary() throws Exception {
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BVariableBinary"));
|
||||
BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BVariableBinary"));
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.util.Arrays;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
@ -31,7 +31,7 @@ public class Test2BDocs extends LuceneTestCase {
|
|||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
dir = newFSDirectory(_TestUtil.getTempDir("2Bdocs"));
|
||||
dir = newFSDirectory(TestUtil.getTempDir("2Bdocs"));
|
||||
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, null));
|
||||
Document doc = new Document();
|
||||
for (int i = 0; i < 262144; i++) {
|
||||
|
@ -61,7 +61,7 @@ public class Test2BDocs extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testExactlyAtLimit() throws Exception {
|
||||
Directory dir2 = newFSDirectory(_TestUtil.getTempDir("2BDocs2"));
|
||||
Directory dir2 = newFSDirectory(TestUtil.getTempDir("2BDocs2"));
|
||||
IndexWriter iw = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, null));
|
||||
Document doc = new Document();
|
||||
for (int i = 0; i < 262143; i++) {
|
||||
|
|
|
@ -23,8 +23,8 @@ import org.apache.lucene.document.NumericDocValuesField;
|
|||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.junit.Ignore;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
|
@ -35,7 +35,7 @@ public class Test2BNumericDocValues extends LuceneTestCase {
|
|||
|
||||
// indexes Integer.MAX_VALUE docs with an increasing dv field
|
||||
public void testNumerics() throws Exception {
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BNumerics"));
|
||||
BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BNumerics"));
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
|
|
@ -28,8 +28,9 @@ import org.apache.lucene.document.TextField;
|
|||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.junit.Ignore;
|
||||
|
||||
|
@ -46,7 +47,7 @@ public class Test2BPositions extends LuceneTestCase {
|
|||
// uses lots of space and takes a few minutes
|
||||
@Ignore("Very slow. Enable manually by removing @Ignore.")
|
||||
public void test() throws Exception {
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BPositions"));
|
||||
BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BPositions"));
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
|
|
@ -29,8 +29,9 @@ import org.apache.lucene.store.BaseDirectoryWrapper;
|
|||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
|
||||
|
@ -44,7 +45,7 @@ public class Test2BPostings extends LuceneTestCase {
|
|||
|
||||
@Nightly
|
||||
public void test() throws Exception {
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BPostings"));
|
||||
BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BPostings"));
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
|
|
@ -30,8 +30,8 @@ import org.apache.lucene.index.FieldInfo.IndexOptions;
|
|||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.junit.Ignore;
|
||||
|
||||
|
@ -50,7 +50,7 @@ public class Test2BPostingsBytes extends LuceneTestCase {
|
|||
// with some codecs needs more heap space as well.
|
||||
@Ignore("Very slow. Enable manually by removing @Ignore.")
|
||||
public void test() throws Exception {
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BPostingsBytes1"));
|
||||
BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BPostingsBytes1"));
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ public class Test2BPostingsBytes extends LuceneTestCase {
|
|||
IndexReader subReaders[] = new IndexReader[1000];
|
||||
Arrays.fill(subReaders, oneThousand);
|
||||
MultiReader mr = new MultiReader(subReaders);
|
||||
BaseDirectoryWrapper dir2 = newFSDirectory(_TestUtil.getTempDir("2BPostingsBytes2"));
|
||||
BaseDirectoryWrapper dir2 = newFSDirectory(TestUtil.getTempDir("2BPostingsBytes2"));
|
||||
if (dir2 instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir2).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ public class Test2BPostingsBytes extends LuceneTestCase {
|
|||
subReaders = new IndexReader[2000];
|
||||
Arrays.fill(subReaders, oneMillion);
|
||||
mr = new MultiReader(subReaders);
|
||||
BaseDirectoryWrapper dir3 = newFSDirectory(_TestUtil.getTempDir("2BPostingsBytes3"));
|
||||
BaseDirectoryWrapper dir3 = newFSDirectory(TestUtil.getTempDir("2BPostingsBytes3"));
|
||||
if (dir3 instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir3).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@ import org.apache.lucene.store.BaseDirectoryWrapper;
|
|||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.junit.Ignore;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
|
@ -38,7 +38,7 @@ public class Test2BSortedDocValues extends LuceneTestCase {
|
|||
|
||||
// indexes Integer.MAX_VALUE docs with a fixed binary field
|
||||
public void testFixedSorted() throws Exception {
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BFixedSorted"));
|
||||
BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BFixedSorted"));
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ public class Test2BSortedDocValues extends LuceneTestCase {
|
|||
// indexes Integer.MAX_VALUE docs with a fixed binary field
|
||||
// TODO: must use random.nextBytes (like Test2BTerms) to avoid BytesRefHash probing issues
|
||||
public void test2BOrds() throws Exception {
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BOrds"));
|
||||
BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BOrds"));
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
addAttribute(TermToBytesRefAttribute.class);
|
||||
bytes.length = TOKEN_LEN;
|
||||
this.random = random;
|
||||
nextSave = _TestUtil.nextInt(random, 500000, 1000000);
|
||||
nextSave = TestUtil.nextInt(random, 500000, 1000000);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,7 +75,7 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
if (--nextSave == 0) {
|
||||
savedTerms.add(BytesRef.deepCopyOf(bytes));
|
||||
System.out.println("TEST: save term=" + bytes);
|
||||
nextSave = _TestUtil.nextInt(random, 500000, 1000000);
|
||||
nextSave = TestUtil.nextInt(random, 500000, 1000000);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -144,11 +144,11 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
System.out.println("Starting Test2B");
|
||||
final long TERM_COUNT = ((long) Integer.MAX_VALUE) + 100000000;
|
||||
|
||||
final int TERMS_PER_DOC = _TestUtil.nextInt(random(), 100000, 1000000);
|
||||
final int TERMS_PER_DOC = TestUtil.nextInt(random(), 100000, 1000000);
|
||||
|
||||
List<BytesRef> savedTerms = null;
|
||||
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BTerms"));
|
||||
BaseDirectoryWrapper dir = newFSDirectory(TestUtil.getTempDir("2BTerms"));
|
||||
//MockDirectoryWrapper dir = newFSDirectory(new File("/p/lucene/indices/2bindex"));
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
|
@ -212,7 +212,7 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
r.close();
|
||||
|
||||
System.out.println("TEST: now CheckIndex...");
|
||||
CheckIndex.Status status = _TestUtil.checkIndex(dir);
|
||||
CheckIndex.Status status = TestUtil.checkIndex(dir);
|
||||
final long tc = status.segmentInfos.get(0).termIndexStatus.termCount;
|
||||
assertTrue("count " + tc + " is not > " + Integer.MAX_VALUE, tc > Integer.MAX_VALUE);
|
||||
|
||||
|
@ -224,13 +224,13 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
System.out.println("TEST: findTerms");
|
||||
final TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator(null);
|
||||
final List<BytesRef> savedTerms = new ArrayList<BytesRef>();
|
||||
int nextSave = _TestUtil.nextInt(random(), 500000, 1000000);
|
||||
int nextSave = TestUtil.nextInt(random(), 500000, 1000000);
|
||||
BytesRef term;
|
||||
while((term = termsEnum.next()) != null) {
|
||||
if (--nextSave == 0) {
|
||||
savedTerms.add(BytesRef.deepCopyOf(term));
|
||||
System.out.println("TEST: add " + term);
|
||||
nextSave = _TestUtil.nextInt(random(), 500000, 1000000);
|
||||
nextSave = TestUtil.nextInt(random(), 500000, 1000000);
|
||||
}
|
||||
}
|
||||
return savedTerms;
|
||||
|
|
|
@ -25,8 +25,9 @@ import org.apache.lucene.store.MMapDirectory;
|
|||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
|
@ -41,7 +42,7 @@ public class Test4GBStoredFields extends LuceneTestCase {
|
|||
|
||||
@Nightly
|
||||
public void test() throws Exception {
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new MMapDirectory(_TestUtil.getTempDir("4GBStoredFields")));
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new MMapDirectory(TestUtil.getTempDir("4GBStoredFields")));
|
||||
dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
|
||||
IndexWriter w = new IndexWriter(dir,
|
||||
|
|
|
@ -46,7 +46,7 @@ import org.apache.lucene.store.MockDirectoryWrapper;
|
|||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestAddIndexes extends LuceneTestCase {
|
||||
|
||||
|
@ -66,7 +66,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
addDocs(writer, 100);
|
||||
assertEquals(100, writer.maxDoc());
|
||||
writer.close();
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
|
||||
writer = newWriter(
|
||||
aux,
|
||||
|
@ -91,7 +91,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
writer.addIndexes(aux, aux2);
|
||||
assertEquals(190, writer.maxDoc());
|
||||
writer.close();
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
|
||||
// make sure the old index is correct
|
||||
verifyNumDocs(aux, 40);
|
||||
|
@ -540,7 +540,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
private void verifyTermDocs(Directory dir, Term term, int numDocs)
|
||||
throws IOException {
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
DocsEnum docsEnum = _TestUtil.docs(random(), reader, term.field, term.bytes, null, null, DocsEnum.FLAG_NONE);
|
||||
DocsEnum docsEnum = TestUtil.docs(random(), reader, term.field, term.bytes, null, null, DocsEnum.FLAG_NONE);
|
||||
int count = 0;
|
||||
while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)
|
||||
count++;
|
||||
|
@ -915,7 +915,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
|
||||
c.launchThreads(-1);
|
||||
|
||||
Thread.sleep(_TestUtil.nextInt(random(), 10, 500));
|
||||
Thread.sleep(TestUtil.nextInt(random(), 10, 500));
|
||||
|
||||
// Close w/o first stopping/joining the threads
|
||||
if (VERBOSE) {
|
||||
|
@ -940,7 +940,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
|
||||
c.launchThreads(-1);
|
||||
|
||||
Thread.sleep(_TestUtil.nextInt(random(), 10, 500));
|
||||
Thread.sleep(TestUtil.nextInt(random(), 10, 500));
|
||||
|
||||
// Close w/o first stopping/joining the threads
|
||||
if (VERBOSE) {
|
||||
|
@ -1016,7 +1016,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
assertEquals(100, writer.maxDoc());
|
||||
writer.commit();
|
||||
writer.close();
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
|
||||
writer = newWriter(
|
||||
aux,
|
||||
|
@ -1141,7 +1141,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random()));
|
||||
conf.setCodec(_TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1 + random().nextInt(20))));
|
||||
conf.setCodec(TestUtil.alwaysPostingsFormat(new Pulsing41PostingsFormat(1 + random().nextInt(20))));
|
||||
IndexWriter w = new IndexWriter(dir, conf);
|
||||
try {
|
||||
w.addIndexes(toAdd);
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Test that a plain default puts codec headers in all files.
|
||||
|
@ -49,7 +49,7 @@ public class TestAllFilesHaveCodecHeader extends LuceneTestCase {
|
|||
doc.add(bodyField);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
idField.setStringValue(Integer.toString(i));
|
||||
bodyField.setStringValue(_TestUtil.randomUnicodeString(random()));
|
||||
bodyField.setStringValue(TestUtil.randomUnicodeString(random()));
|
||||
riw.addDocument(doc);
|
||||
if (random().nextInt(7) == 0) {
|
||||
riw.commit();
|
||||
|
|
|
@ -17,7 +17,6 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.*;
|
||||
|
@ -176,10 +175,10 @@ public class TestAtomicUpdate extends LuceneTestCase {
|
|||
directory.close();
|
||||
|
||||
// Second in an FSDirectory:
|
||||
File dirPath = _TestUtil.getTempDir("lucene.test.atomic");
|
||||
File dirPath = TestUtil.getTempDir("lucene.test.atomic");
|
||||
directory = newFSDirectory(dirPath);
|
||||
runTest(directory);
|
||||
directory.close();
|
||||
_TestUtil.rmDir(dirPath);
|
||||
TestUtil.rmDir(dirPath);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ import org.apache.lucene.util.Constants;
|
|||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
@ -205,7 +205,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
*/
|
||||
private static IndexUpgrader newIndexUpgrader(Directory dir) {
|
||||
final boolean streamType = random().nextBoolean();
|
||||
final int choice = _TestUtil.nextInt(random(), 0, 2);
|
||||
final int choice = TestUtil.nextInt(random(), 0, 2);
|
||||
switch (choice) {
|
||||
case 0: return new IndexUpgrader(dir, TEST_VERSION_CURRENT);
|
||||
case 1: return new IndexUpgrader(dir, TEST_VERSION_CURRENT,
|
||||
|
@ -224,9 +224,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
names.addAll(Arrays.asList(oldSingleSegmentNames));
|
||||
oldIndexDirs = new HashMap<String,Directory>();
|
||||
for (String name : names) {
|
||||
File dir = _TestUtil.getTempDir(name);
|
||||
File dir = TestUtil.getTempDir(name);
|
||||
File dataFile = new File(TestBackwardsCompatibility.class.getResource("index." + name + ".zip").toURI());
|
||||
_TestUtil.unzip(dataFile, dir);
|
||||
TestUtil.unzip(dataFile, dir);
|
||||
oldIndexDirs.put(name, newFSDirectory(dir));
|
||||
}
|
||||
}
|
||||
|
@ -245,8 +245,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("TEST: index " + unsupportedNames[i]);
|
||||
}
|
||||
File oldIndxeDir = _TestUtil.getTempDir(unsupportedNames[i]);
|
||||
_TestUtil.unzip(getDataFile("unsupported." + unsupportedNames[i] + ".zip"), oldIndxeDir);
|
||||
File oldIndxeDir = TestUtil.getTempDir(unsupportedNames[i]);
|
||||
TestUtil.unzip(getDataFile("unsupported." + unsupportedNames[i] + ".zip"), oldIndxeDir);
|
||||
BaseDirectoryWrapper dir = newFSDirectory(oldIndxeDir);
|
||||
// don't checkindex, these are intentionally not supported
|
||||
dir.setCheckIndexOnClose(false);
|
||||
|
@ -295,7 +295,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
assertTrue(bos.toString("UTF-8").contains(IndexFormatTooOldException.class.getName()));
|
||||
|
||||
dir.close();
|
||||
_TestUtil.rmDir(oldIndxeDir);
|
||||
TestUtil.rmDir(oldIndxeDir);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -388,7 +388,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
IndexReader reader = DirectoryReader.open(dir);
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
|
||||
// true if this is a 4.0+ index
|
||||
final boolean is40Index = MultiFields.getMergedFieldInfos(reader).fieldInfo("content5") != null;
|
||||
|
@ -594,7 +594,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
public File createIndex(String dirName, boolean doCFS, boolean fullyMerged) throws IOException {
|
||||
// we use a real directory name that is not cleaned up, because this method is only used to create backwards indexes:
|
||||
File indexDir = new File("/tmp/idx", dirName);
|
||||
_TestUtil.rmDir(indexDir);
|
||||
TestUtil.rmDir(indexDir);
|
||||
Directory dir = newFSDirectory(indexDir);
|
||||
LogByteSizeMergePolicy mp = new LogByteSizeMergePolicy();
|
||||
mp.setNoCFSRatio(doCFS ? 1.0 : 0.0);
|
||||
|
@ -642,8 +642,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
public void testExactFileNames() throws IOException {
|
||||
|
||||
String outputDirName = "lucene.backwardscompat0.index";
|
||||
File outputDir = _TestUtil.getTempDir(outputDirName);
|
||||
_TestUtil.rmDir(outputDir);
|
||||
File outputDir = TestUtil.getTempDir(outputDirName);
|
||||
TestUtil.rmDir(outputDir);
|
||||
|
||||
try {
|
||||
Directory dir = newFSDirectory(outputDir);
|
||||
|
@ -701,7 +701,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
}
|
||||
dir.close();
|
||||
} finally {
|
||||
_TestUtil.rmDir(outputDir);
|
||||
TestUtil.rmDir(outputDir);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -811,7 +811,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
// should be found exactly
|
||||
assertEquals(TermsEnum.SeekStatus.FOUND,
|
||||
terms.seekCeil(aaaTerm));
|
||||
assertEquals(35, countDocs(_TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
|
||||
assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
|
||||
assertNull(terms.next());
|
||||
|
||||
// should hit end of field
|
||||
|
@ -823,12 +823,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
assertEquals(TermsEnum.SeekStatus.NOT_FOUND,
|
||||
terms.seekCeil(new BytesRef("a")));
|
||||
assertTrue(terms.term().bytesEquals(aaaTerm));
|
||||
assertEquals(35, countDocs(_TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
|
||||
assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
|
||||
assertNull(terms.next());
|
||||
|
||||
assertEquals(TermsEnum.SeekStatus.FOUND,
|
||||
terms.seekCeil(aaaTerm));
|
||||
assertEquals(35, countDocs(_TestUtil.docs(random(), terms,null, null, DocsEnum.FLAG_NONE)));
|
||||
assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
|
||||
assertNull(terms.next());
|
||||
|
||||
r.close();
|
||||
|
@ -952,9 +952,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
public void testCommandLineArgs() throws Exception {
|
||||
|
||||
for (String name : oldIndexDirs.keySet()) {
|
||||
File dir = _TestUtil.getTempDir(name);
|
||||
File dir = TestUtil.getTempDir(name);
|
||||
File dataFile = new File(TestBackwardsCompatibility.class.getResource("index." + name + ".zip").toURI());
|
||||
_TestUtil.unzip(dataFile, dir);
|
||||
TestUtil.unzip(dataFile, dir);
|
||||
|
||||
String path = dir.getAbsolutePath();
|
||||
|
||||
|
@ -1045,11 +1045,11 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
public static final String moreTermsIndex = "moreterms.40.zip";
|
||||
|
||||
public void testMoreTerms() throws Exception {
|
||||
File oldIndexDir = _TestUtil.getTempDir("moreterms");
|
||||
_TestUtil.unzip(getDataFile(moreTermsIndex), oldIndexDir);
|
||||
File oldIndexDir = TestUtil.getTempDir("moreterms");
|
||||
TestUtil.unzip(getDataFile(moreTermsIndex), oldIndexDir);
|
||||
Directory dir = newFSDirectory(oldIndexDir);
|
||||
// TODO: more tests
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Simple test that adds numeric terms, where each term has the
|
||||
|
@ -45,8 +45,8 @@ public class TestBagOfPositions extends LuceneTestCase {
|
|||
public void test() throws Exception {
|
||||
List<String> postingsList = new ArrayList<String>();
|
||||
int numTerms = atLeast(300);
|
||||
final int maxTermsPerDoc = _TestUtil.nextInt(random(), 10, 20);
|
||||
boolean isSimpleText = "SimpleText".equals(_TestUtil.getPostingsFormat("field"));
|
||||
final int maxTermsPerDoc = TestUtil.nextInt(random(), 10, 20);
|
||||
boolean isSimpleText = "SimpleText".equals(TestUtil.getPostingsFormat("field"));
|
||||
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(random(), TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
|
||||
|
@ -68,11 +68,11 @@ public class TestBagOfPositions extends LuceneTestCase {
|
|||
|
||||
final ConcurrentLinkedQueue<String> postings = new ConcurrentLinkedQueue<String>(postingsList);
|
||||
|
||||
Directory dir = newFSDirectory(_TestUtil.getTempDir("bagofpositions"));
|
||||
Directory dir = newFSDirectory(TestUtil.getTempDir("bagofpositions"));
|
||||
|
||||
final RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
|
||||
|
||||
int threadCount = _TestUtil.nextInt(random(), 1, 5);
|
||||
int threadCount = TestUtil.nextInt(random(), 1, 5);
|
||||
if (VERBOSE) {
|
||||
System.out.println("config: " + iw.w.getConfig());
|
||||
System.out.println("threadCount=" + threadCount);
|
||||
|
@ -87,7 +87,7 @@ public class TestBagOfPositions extends LuceneTestCase {
|
|||
if (options == 0) {
|
||||
fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS); // we dont actually need positions
|
||||
fieldType.setStoreTermVectors(true); // but enforce term vectors when we do this so we check SOMETHING
|
||||
} else if (options == 1 && !doesntSupportOffsets.contains(_TestUtil.getPostingsFormat("field"))) {
|
||||
} else if (options == 1 && !doesntSupportOffsets.contains(TestUtil.getPostingsFormat("field"))) {
|
||||
fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
|
||||
}
|
||||
// else just positions
|
||||
|
|
|
@ -32,7 +32,8 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Simple test that adds numeric terms, where each term has the
|
||||
|
@ -43,9 +44,9 @@ public class TestBagOfPostings extends LuceneTestCase {
|
|||
public void test() throws Exception {
|
||||
List<String> postingsList = new ArrayList<String>();
|
||||
int numTerms = atLeast(300);
|
||||
final int maxTermsPerDoc = _TestUtil.nextInt(random(), 10, 20);
|
||||
final int maxTermsPerDoc = TestUtil.nextInt(random(), 10, 20);
|
||||
|
||||
boolean isSimpleText = "SimpleText".equals(_TestUtil.getPostingsFormat("field"));
|
||||
boolean isSimpleText = "SimpleText".equals(TestUtil.getPostingsFormat("field"));
|
||||
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(random(), TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
|
||||
|
@ -69,10 +70,10 @@ public class TestBagOfPostings extends LuceneTestCase {
|
|||
|
||||
final ConcurrentLinkedQueue<String> postings = new ConcurrentLinkedQueue<String>(postingsList);
|
||||
|
||||
Directory dir = newFSDirectory(_TestUtil.getTempDir("bagofpostings"));
|
||||
Directory dir = newFSDirectory(TestUtil.getTempDir("bagofpostings"));
|
||||
final RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
|
||||
|
||||
int threadCount = _TestUtil.nextInt(random(), 1, 5);
|
||||
int threadCount = TestUtil.nextInt(random(), 1, 5);
|
||||
if (VERBOSE) {
|
||||
System.out.println("config: " + iw.w.getConfig());
|
||||
System.out.println("threadCount=" + threadCount);
|
||||
|
|
|
@ -23,7 +23,8 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestCodecHoldsOpenFiles extends LuceneTestCase {
|
||||
public void test() throws Exception {
|
||||
|
@ -49,7 +50,7 @@ public class TestCodecHoldsOpenFiles extends LuceneTestCase {
|
|||
}
|
||||
|
||||
for(AtomicReaderContext cxt : r.leaves()) {
|
||||
_TestUtil.checkReader(cxt.reader());
|
||||
TestUtil.checkReader(cxt.reader());
|
||||
}
|
||||
|
||||
r.close();
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.InfoStream;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
// TODO: test multiple codecs here?
|
||||
|
@ -187,7 +187,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
// Make term text
|
||||
String text2;
|
||||
while(true) {
|
||||
text2 = _TestUtil.randomUnicodeString(random());
|
||||
text2 = TestUtil.randomUnicodeString(random());
|
||||
if (!termsSeen.contains(text2) && !text2.endsWith(".")) {
|
||||
termsSeen.add(text2);
|
||||
break;
|
||||
|
@ -205,7 +205,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
|
||||
int docID = 0;
|
||||
for(int j=0;j<docFreq;j++) {
|
||||
docID += _TestUtil.nextInt(random(), 1, 10);
|
||||
docID += TestUtil.nextInt(random(), 1, 10);
|
||||
docs[j] = docID;
|
||||
|
||||
if (!omitTF) {
|
||||
|
@ -213,7 +213,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
positions[j] = new PositionData[termFreq];
|
||||
int position = 0;
|
||||
for(int k=0;k<termFreq;k++) {
|
||||
position += _TestUtil.nextInt(random(), 1, 10);
|
||||
position += TestUtil.nextInt(random(), 1, 10);
|
||||
|
||||
final BytesRef payload;
|
||||
if (storePayloads && random().nextInt(4) == 0) {
|
||||
|
@ -276,7 +276,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
// make sure it properly fully resets (rewinds) its
|
||||
// internal state:
|
||||
for(int iter=0;iter<2;iter++) {
|
||||
docsEnum = _TestUtil.docs(random(), termsEnum, null, docsEnum, DocsEnum.FLAG_NONE);
|
||||
docsEnum = TestUtil.docs(random(), termsEnum, null, docsEnum, DocsEnum.FLAG_NONE);
|
||||
assertEquals(terms[i].docs[0], docsEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
|
||||
}
|
||||
|
@ -341,7 +341,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
final IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random()));
|
||||
config.setMergePolicy(newLogMergePolicy());
|
||||
config.setCodec(_TestUtil.alwaysPostingsFormat(new MockSepPostingsFormat()));
|
||||
config.setCodec(TestUtil.alwaysPostingsFormat(new MockSepPostingsFormat()));
|
||||
final IndexWriter writer = new IndexWriter(dir, config);
|
||||
|
||||
try {
|
||||
|
@ -472,7 +472,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
assertEquals(status, TermsEnum.SeekStatus.FOUND);
|
||||
assertEquals(term.docs.length, termsEnum.docFreq());
|
||||
if (field.omitTF) {
|
||||
this.verifyDocs(term.docs, term.positions, _TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_NONE), false);
|
||||
this.verifyDocs(term.docs, term.positions, TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_NONE), false);
|
||||
} else {
|
||||
this.verifyDocs(term.docs, term.positions, termsEnum.docsAndPositions(null, null), true);
|
||||
}
|
||||
|
@ -492,7 +492,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
assertTrue(termsEnum.term().bytesEquals(new BytesRef(term.text2)));
|
||||
assertEquals(term.docs.length, termsEnum.docFreq());
|
||||
if (field.omitTF) {
|
||||
this.verifyDocs(term.docs, term.positions, _TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_NONE), false);
|
||||
this.verifyDocs(term.docs, term.positions, TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_NONE), false);
|
||||
} else {
|
||||
this.verifyDocs(term.docs, term.positions, termsEnum.docsAndPositions(null, null), true);
|
||||
}
|
||||
|
@ -503,7 +503,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
System.out.println("TEST: seek non-exist terms");
|
||||
}
|
||||
for(int i=0;i<100;i++) {
|
||||
final String text2 = _TestUtil.randomUnicodeString(random()) + ".";
|
||||
final String text2 = TestUtil.randomUnicodeString(random()) + ".";
|
||||
status = termsEnum.seekCeil(new BytesRef(text2));
|
||||
assertTrue(status == TermsEnum.SeekStatus.NOT_FOUND ||
|
||||
status == TermsEnum.SeekStatus.END);
|
||||
|
@ -549,11 +549,11 @@ public class TestCodecs extends LuceneTestCase {
|
|||
if (postings != null) {
|
||||
docs = postings;
|
||||
} else {
|
||||
docs = _TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_FREQS);
|
||||
docs = TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_FREQS);
|
||||
}
|
||||
} else {
|
||||
postings = null;
|
||||
docs = _TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_NONE);
|
||||
docs = TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_NONE);
|
||||
}
|
||||
assertNotNull(docs);
|
||||
int upto2 = -1;
|
||||
|
|
|
@ -32,7 +32,8 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store._TestHelper;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
|
||||
public class TestCompoundFile extends LuceneTestCase
|
||||
|
@ -42,7 +43,7 @@ public class TestCompoundFile extends LuceneTestCase
|
|||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
File file = _TestUtil.getTempDir("testIndex");
|
||||
File file = TestUtil.getTempDir("testIndex");
|
||||
// use a simple FSDir here, to be sure to have SimpleFSInputs
|
||||
dir = new SimpleFSDirectory(file,null);
|
||||
}
|
||||
|
@ -775,7 +776,7 @@ public class TestCompoundFile extends LuceneTestCase
|
|||
// when reading a CFS with many subs:
|
||||
public void testManySubFiles() throws IOException {
|
||||
|
||||
final Directory d = newFSDirectory(_TestUtil.getTempDir("CFSManySubFiles"));
|
||||
final Directory d = newFSDirectory(TestUtil.getTempDir("CFSManySubFiles"));
|
||||
final int FILE_COUNT = atLeast(500);
|
||||
|
||||
for(int fileIdx=0;fileIdx<FILE_COUNT;fileIdx++) {
|
||||
|
@ -820,7 +821,7 @@ public class TestCompoundFile extends LuceneTestCase
|
|||
doc.add(bodyField);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
idField.setStringValue(Integer.toString(i));
|
||||
bodyField.setStringValue(_TestUtil.randomUnicodeString(random()));
|
||||
bodyField.setStringValue(TestUtil.randomUnicodeString(random()));
|
||||
riw.addDocument(doc);
|
||||
if (random().nextInt(7) == 0) {
|
||||
riw.commit();
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
||||
|
||||
|
@ -265,8 +265,8 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
|
||||
final int maxMergeCount = _TestUtil.nextInt(random(), 1, 5);
|
||||
final int maxMergeThreads = _TestUtil.nextInt(random(), 1, maxMergeCount);
|
||||
final int maxMergeCount = TestUtil.nextInt(random(), 1, 5);
|
||||
final int maxMergeThreads = TestUtil.nextInt(random(), 1, maxMergeCount);
|
||||
final CountDownLatch enoughMergesWaiting = new CountDownLatch(maxMergeCount);
|
||||
final AtomicInteger runningMergeCount = new AtomicInteger(0);
|
||||
final AtomicBoolean failed = new AtomicBoolean();
|
||||
|
@ -352,9 +352,9 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
iwc.setMaxBufferedDocs(5);
|
||||
iwc.setMergeScheduler(new TrackingCMS());
|
||||
if (_TestUtil.getPostingsFormat("id").equals("SimpleText")) {
|
||||
if (TestUtil.getPostingsFormat("id").equals("SimpleText")) {
|
||||
// no
|
||||
iwc.setCodec(_TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
|
||||
iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
|
||||
}
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc);
|
||||
for(int i=0;i<1000;i++) {
|
||||
|
|
|
@ -32,7 +32,8 @@ import org.apache.lucene.store.FilterDirectory;
|
|||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestCrashCausesCorruptIndex extends LuceneTestCase {
|
||||
|
||||
|
@ -42,7 +43,7 @@ public class TestCrashCausesCorruptIndex extends LuceneTestCase {
|
|||
* LUCENE-3627: This test fails.
|
||||
*/
|
||||
public void testCrashCorruptsIndexing() throws Exception {
|
||||
path = _TestUtil.getTempDir("testCrashCorruptsIndexing");
|
||||
path = TestUtil.getTempDir("testCrashCorruptsIndexing");
|
||||
|
||||
indexAndCrashOnCreateOutputSegments2();
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.lucene.search.ScoreDoc;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/*
|
||||
Verify we can read the pre-2.1 file format, do searches
|
||||
|
@ -235,7 +235,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
long lastDeleteTime = 0;
|
||||
final int targetNumDelete = _TestUtil.nextInt(random(), 1, 5);
|
||||
final int targetNumDelete = TestUtil.nextInt(random(), 1, 5);
|
||||
while (policy.numDelete < targetNumDelete) {
|
||||
// Record last time when writer performed deletes of
|
||||
// past commits
|
||||
|
|
|
@ -29,7 +29,6 @@ import java.util.Random;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
|
@ -45,7 +44,7 @@ import org.apache.lucene.store.NoSuchDirectoryException;
|
|||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.Assume;
|
||||
|
||||
public class TestDirectoryReader extends LuceneTestCase {
|
||||
|
@ -94,18 +93,18 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
// test mixing up TermDocs and TermEnums from different readers.
|
||||
TermsEnum te2 = MultiFields.getTerms(mr2, "body").iterator(null);
|
||||
te2.seekCeil(new BytesRef("wow"));
|
||||
DocsEnum td = _TestUtil.docs(random(), mr2,
|
||||
"body",
|
||||
te2.term(),
|
||||
MultiFields.getLiveDocs(mr2),
|
||||
null,
|
||||
0);
|
||||
DocsEnum td = TestUtil.docs(random(), mr2,
|
||||
"body",
|
||||
te2.term(),
|
||||
MultiFields.getLiveDocs(mr2),
|
||||
null,
|
||||
0);
|
||||
|
||||
TermsEnum te3 = MultiFields.getTerms(mr3, "body").iterator(null);
|
||||
te3.seekCeil(new BytesRef("wow"));
|
||||
td = _TestUtil.docs(random(), te3, MultiFields.getLiveDocs(mr3),
|
||||
td,
|
||||
0);
|
||||
td = TestUtil.docs(random(), te3, MultiFields.getLiveDocs(mr3),
|
||||
td,
|
||||
0);
|
||||
|
||||
int ret = 0;
|
||||
|
||||
|
@ -352,12 +351,12 @@ void assertTermDocsCount(String msg,
|
|||
Term term,
|
||||
int expected)
|
||||
throws IOException {
|
||||
DocsEnum tdocs = _TestUtil.docs(random(), reader,
|
||||
term.field(),
|
||||
new BytesRef(term.text()),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
0);
|
||||
DocsEnum tdocs = TestUtil.docs(random(), reader,
|
||||
term.field(),
|
||||
new BytesRef(term.text()),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
0);
|
||||
int count = 0;
|
||||
if (tdocs != null) {
|
||||
while(tdocs.nextDoc()!= DocIdSetIterator.NO_MORE_DOCS) {
|
||||
|
@ -439,7 +438,7 @@ void assertTermDocsCount(String msg,
|
|||
|
||||
public void testFilesOpenClose() throws IOException {
|
||||
// Create initial data set
|
||||
File dirFile = _TestUtil.getTempDir("TestIndexReader.testFilesOpenClose");
|
||||
File dirFile = TestUtil.getTempDir("TestIndexReader.testFilesOpenClose");
|
||||
Directory dir = newFSDirectory(dirFile);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
|
||||
addDoc(writer, "test");
|
||||
|
@ -447,7 +446,7 @@ public void testFilesOpenClose() throws IOException {
|
|||
dir.close();
|
||||
|
||||
// Try to erase the data - this ensures that the writer closed all files
|
||||
_TestUtil.rmDir(dirFile);
|
||||
TestUtil.rmDir(dirFile);
|
||||
dir = newFSDirectory(dirFile);
|
||||
|
||||
// Now create the data set again, just as before
|
||||
|
@ -464,11 +463,11 @@ public void testFilesOpenClose() throws IOException {
|
|||
|
||||
// The following will fail if reader did not close
|
||||
// all files
|
||||
_TestUtil.rmDir(dirFile);
|
||||
TestUtil.rmDir(dirFile);
|
||||
}
|
||||
|
||||
public void testOpenReaderAfterDelete() throws IOException {
|
||||
File dirFile = _TestUtil.getTempDir("deletetest");
|
||||
File dirFile = TestUtil.getTempDir("deletetest");
|
||||
Directory dir = newFSDirectory(dirFile);
|
||||
try {
|
||||
DirectoryReader.open(dir);
|
||||
|
@ -716,8 +715,8 @@ public void testFilesOpenClose() throws IOException {
|
|||
// DirectoryReader on a non-existent directory, you get a
|
||||
// good exception
|
||||
public void testNoDir() throws Throwable {
|
||||
File tempDir = _TestUtil.getTempDir("doesnotexist");
|
||||
_TestUtil.rmDir(tempDir);
|
||||
File tempDir = TestUtil.getTempDir("doesnotexist");
|
||||
TestUtil.rmDir(tempDir);
|
||||
Directory dir = newFSDirectory(tempDir);
|
||||
try {
|
||||
DirectoryReader.open(dir);
|
||||
|
@ -1091,7 +1090,7 @@ public void testFilesOpenClose() throws IOException {
|
|||
}
|
||||
|
||||
public void testIndexExistsOnNonExistentDirectory() throws Exception {
|
||||
File tempDir = _TestUtil.getTempDir("testIndexExistsOnNonExistentDirectory");
|
||||
File tempDir = TestUtil.getTempDir("testIndexExistsOnNonExistentDirectory");
|
||||
tempDir.delete();
|
||||
Directory dir = newFSDirectory(tempDir);
|
||||
assertFalse(DirectoryReader.indexExists(dir));
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.lucene.search.ScoreDoc;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestDirectoryReaderReopen extends LuceneTestCase {
|
||||
|
||||
|
@ -196,7 +196,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase {
|
|||
public void testThreadSafety() throws Exception {
|
||||
final Directory dir = newDirectory();
|
||||
// NOTE: this also controls the number of threads!
|
||||
final int n = _TestUtil.nextInt(random(), 20, 40);
|
||||
final int n = TestUtil.nextInt(random(), 20, 40);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random())));
|
||||
for (int i = 0; i < n; i++) {
|
||||
|
@ -275,7 +275,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
synchronized(this) {
|
||||
wait(_TestUtil.nextInt(random(), 1, 100));
|
||||
wait(TestUtil.nextInt(random(), 1, 100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -294,7 +294,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase {
|
|||
}
|
||||
|
||||
synchronized(this) {
|
||||
wait(_TestUtil.nextInt(random(), 1, 100));
|
||||
wait(TestUtil.nextInt(random(), 1, 100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.apache.lucene.store.TrackingDirectoryWrapper;
|
|||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.InfoStream;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
|
||||
/** JUnit adaptation of an older test case DocTest. */
|
||||
|
@ -62,10 +62,10 @@ public class TestDoc extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("TEST: setUp");
|
||||
}
|
||||
workDir = _TestUtil.getTempDir("TestDoc");
|
||||
workDir = TestUtil.getTempDir("TestDoc");
|
||||
workDir.mkdirs();
|
||||
|
||||
indexDir = _TestUtil.getTempDir("testIndex");
|
||||
indexDir = TestUtil.getTempDir("testIndex");
|
||||
indexDir.mkdirs();
|
||||
|
||||
Directory directory = newFSDirectory(indexDir);
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.lucene.search.DocIdSetIterator;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests the Terms.docCount statistic
|
||||
|
@ -49,9 +49,9 @@ public class TestDocCount extends LuceneTestCase {
|
|||
|
||||
private Document doc() {
|
||||
Document doc = new Document();
|
||||
int numFields = _TestUtil.nextInt(random(), 1, 10);
|
||||
int numFields = TestUtil.nextInt(random(), 1, 10);
|
||||
for (int i = 0; i < numFields; i++) {
|
||||
doc.add(newStringField("" + _TestUtil.nextInt(random(), 'a', 'z'), "" + _TestUtil.nextInt(random(), 'a', 'z'), Field.Store.NO));
|
||||
doc.add(newStringField("" + TestUtil.nextInt(random(), 'a', 'z'), "" + TestUtil.nextInt(random(), 'a', 'z'), Field.Store.NO));
|
||||
}
|
||||
return doc;
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ public class TestDocCount extends LuceneTestCase {
|
|||
FixedBitSet visited = new FixedBitSet(ir.maxDoc());
|
||||
TermsEnum te = terms.iterator(null);
|
||||
while (te.next() != null) {
|
||||
DocsEnum de = _TestUtil.docs(random(), te, null, null, DocsEnum.FLAG_NONE);
|
||||
DocsEnum de = TestUtil.docs(random(), te, null, null, DocsEnum.FLAG_NONE);
|
||||
while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
visited.set(de.docID());
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
// TODO:
|
||||
// - test w/ del docs
|
||||
|
@ -97,7 +97,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
final int NUM_TERMS = atLeast(20);
|
||||
final Set<BytesRef> terms = new HashSet<BytesRef>();
|
||||
while(terms.size() < NUM_TERMS) {
|
||||
final String s = _TestUtil.randomRealisticUnicodeString(random());
|
||||
final String s = TestUtil.randomRealisticUnicodeString(random());
|
||||
//final String s = _TestUtil.randomSimpleString(random);
|
||||
if (s.length() > 0) {
|
||||
terms.add(new BytesRef(s));
|
||||
|
@ -113,7 +113,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
// Sometimes swap in codec that impls ord():
|
||||
if (random().nextInt(10) == 7) {
|
||||
// Make sure terms index has ords:
|
||||
Codec codec = _TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene41WithOrds"));
|
||||
Codec codec = TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene41WithOrds"));
|
||||
conf.setCodec(codec);
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
|
||||
doc.add(new IntField("id", id, Field.Store.YES));
|
||||
|
||||
final int termCount = _TestUtil.nextInt(random(), 0, 20*RANDOM_MULTIPLIER);
|
||||
final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER);
|
||||
while(ordsForDocSet.size() < termCount) {
|
||||
ordsForDocSet.add(random().nextInt(termsArray.length));
|
||||
}
|
||||
|
@ -182,12 +182,12 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
|
||||
final Set<String> prefixes = new HashSet<String>();
|
||||
final int numPrefix = _TestUtil.nextInt(random(), 2, 7);
|
||||
final int numPrefix = TestUtil.nextInt(random(), 2, 7);
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: use " + numPrefix + " prefixes");
|
||||
}
|
||||
while(prefixes.size() < numPrefix) {
|
||||
prefixes.add(_TestUtil.randomRealisticUnicodeString(random()));
|
||||
prefixes.add(TestUtil.randomRealisticUnicodeString(random()));
|
||||
//prefixes.add(_TestUtil.randomSimpleString(random));
|
||||
}
|
||||
final String[] prefixesArray = prefixes.toArray(new String[prefixes.size()]);
|
||||
|
@ -195,7 +195,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
final int NUM_TERMS = atLeast(20);
|
||||
final Set<BytesRef> terms = new HashSet<BytesRef>();
|
||||
while(terms.size() < NUM_TERMS) {
|
||||
final String s = prefixesArray[random().nextInt(prefixesArray.length)] + _TestUtil.randomRealisticUnicodeString(random());
|
||||
final String s = prefixesArray[random().nextInt(prefixesArray.length)] + TestUtil.randomRealisticUnicodeString(random());
|
||||
//final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomSimpleString(random);
|
||||
if (s.length() > 0) {
|
||||
terms.add(new BytesRef(s));
|
||||
|
@ -210,7 +210,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
|
||||
// Sometimes swap in codec that impls ord():
|
||||
if (random().nextInt(10) == 7) {
|
||||
Codec codec = _TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene41WithOrds"));
|
||||
Codec codec = TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene41WithOrds"));
|
||||
conf.setCodec(codec);
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
|
||||
doc.add(new IntField("id", id, Field.Store.YES));
|
||||
|
||||
final int termCount = _TestUtil.nextInt(random(), 0, 20*RANDOM_MULTIPLIER);
|
||||
final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER);
|
||||
while(ordsForDocSet.size() < termCount) {
|
||||
ordsForDocSet.add(random().nextInt(termsArray.length));
|
||||
}
|
||||
|
@ -303,7 +303,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
"field",
|
||||
prefixRef,
|
||||
Integer.MAX_VALUE,
|
||||
_TestUtil.nextInt(random(), 2, 10));
|
||||
TestUtil.nextInt(random(), 2, 10));
|
||||
|
||||
|
||||
final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(r, "id", false);
|
||||
|
|
|
@ -18,7 +18,8 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/** Tests the codec configuration defined by LuceneTestCase randomly
|
||||
* (typically a mix across different fields).
|
||||
|
@ -32,6 +33,6 @@ public class TestDocValuesFormat extends BaseDocValuesFormatTestCase {
|
|||
|
||||
@Override
|
||||
protected boolean codecAcceptsHugeBinaryValues(String field) {
|
||||
return _TestUtil.fieldSupportsHugeBinaryDocValues(field);
|
||||
return TestUtil.fieldSupportsHugeBinaryDocValues(field);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,8 @@ import org.apache.lucene.search.FieldCache;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestDocValuesWithThreads extends LuceneTestCase {
|
||||
|
||||
|
@ -50,10 +51,10 @@ public class TestDocValuesWithThreads extends LuceneTestCase {
|
|||
Document d = new Document();
|
||||
long number = random().nextLong();
|
||||
d.add(new NumericDocValuesField("number", number));
|
||||
BytesRef bytes = new BytesRef(_TestUtil.randomRealisticUnicodeString(random()));
|
||||
BytesRef bytes = new BytesRef(TestUtil.randomRealisticUnicodeString(random()));
|
||||
d.add(new BinaryDocValuesField("bytes", bytes));
|
||||
binary.add(bytes);
|
||||
bytes = new BytesRef(_TestUtil.randomRealisticUnicodeString(random()));
|
||||
bytes = new BytesRef(TestUtil.randomRealisticUnicodeString(random()));
|
||||
d.add(new SortedDocValuesField("sorted", bytes));
|
||||
sorted.add(bytes);
|
||||
w.addDocument(d);
|
||||
|
@ -67,7 +68,7 @@ public class TestDocValuesWithThreads extends LuceneTestCase {
|
|||
assertEquals(1, r.leaves().size());
|
||||
final AtomicReader ar = r.leaves().get(0).reader();
|
||||
|
||||
int numThreads = _TestUtil.nextInt(random(), 2, 5);
|
||||
int numThreads = TestUtil.nextInt(random(), 2, 5);
|
||||
List<Thread> threads = new ArrayList<Thread>();
|
||||
final CountDownLatch startingGun = new CountDownLatch(1);
|
||||
for(int t=0;t<numThreads;t++) {
|
||||
|
@ -143,9 +144,9 @@ public class TestDocValuesWithThreads extends LuceneTestCase {
|
|||
while (numDocs < NUM_DOCS) {
|
||||
final String s;
|
||||
if (random.nextBoolean()) {
|
||||
s = _TestUtil.randomSimpleString(random);
|
||||
s = TestUtil.randomSimpleString(random);
|
||||
} else {
|
||||
s = _TestUtil.randomUnicodeString(random);
|
||||
s = TestUtil.randomUnicodeString(random);
|
||||
}
|
||||
final BytesRef br = new BytesRef(s);
|
||||
|
||||
|
@ -181,7 +182,7 @@ public class TestDocValuesWithThreads extends LuceneTestCase {
|
|||
|
||||
final long END_TIME = System.currentTimeMillis() + (TEST_NIGHTLY ? 30 : 1);
|
||||
|
||||
final int NUM_THREADS = _TestUtil.nextInt(random(), 1, 10);
|
||||
final int NUM_THREADS = TestUtil.nextInt(random(), 1, 10);
|
||||
Thread[] threads = new Thread[NUM_THREADS];
|
||||
for(int thread=0;thread<NUM_THREADS;thread++) {
|
||||
threads[thread] = new Thread() {
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestDocsAndPositions extends LuceneTestCase {
|
||||
private String fieldName;
|
||||
|
@ -224,7 +224,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
IndexReaderContext topReaderContext = reader.getContext();
|
||||
for (AtomicReaderContext context : topReaderContext.leaves()) {
|
||||
int maxDoc = context.reader().maxDoc();
|
||||
DocsEnum docsEnum = _TestUtil.docs(random(), context.reader(), fieldName, bytes, null, null, DocsEnum.FLAG_FREQS);
|
||||
DocsEnum docsEnum = TestUtil.docs(random(), context.reader(), fieldName, bytes, null, null, DocsEnum.FLAG_FREQS);
|
||||
if (findNext(freqInDoc, context.docBase, context.docBase + maxDoc) == Integer.MAX_VALUE) {
|
||||
assertNull(docsEnum);
|
||||
continue;
|
||||
|
@ -334,7 +334,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
DirectoryReader reader = writer.getReader();
|
||||
AtomicReader r = getOnlySegmentReader(reader);
|
||||
DocsEnum disi = _TestUtil.docs(random(), r, "foo", new BytesRef("bar"), null, null, DocsEnum.FLAG_NONE);
|
||||
DocsEnum disi = TestUtil.docs(random(), r, "foo", new BytesRef("bar"), null, null, DocsEnum.FLAG_NONE);
|
||||
int docid = disi.docID();
|
||||
assertEquals(-1, docid);
|
||||
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
@ -342,7 +342,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
// now reuse and check again
|
||||
TermsEnum te = r.terms("foo").iterator(null);
|
||||
assertTrue(te.seekExact(new BytesRef("bar")));
|
||||
disi = _TestUtil.docs(random(), te, null, disi, DocsEnum.FLAG_NONE);
|
||||
disi = TestUtil.docs(random(), te, null, disi, DocsEnum.FLAG_NONE);
|
||||
docid = disi.docID();
|
||||
assertEquals(-1, docid);
|
||||
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
|
||||
import org.apache.lucene.analysis.*;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
|
@ -35,7 +34,8 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestDocumentWriter extends LuceneTestCase {
|
||||
private Directory dir;
|
||||
|
@ -283,7 +283,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
// f1
|
||||
|
@ -324,7 +324,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.forceMerge(1); // be sure to have a single segment
|
||||
writer.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
|
||||
SegmentReader reader = getOnlySegmentReader(DirectoryReader.open(dir));
|
||||
FieldInfos fi = reader.getFieldInfos();
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Compares one codec against another
|
||||
|
@ -92,8 +92,8 @@ public class TestDuelingCodecs extends LuceneTestCase {
|
|||
rightWriter.close();
|
||||
|
||||
// check that our readers are valid
|
||||
_TestUtil.checkReader(leftReader);
|
||||
_TestUtil.checkReader(rightReader);
|
||||
TestUtil.checkReader(leftReader);
|
||||
TestUtil.checkReader(rightReader);
|
||||
|
||||
info = "left: " + leftCodec.toString() + " / right: " + rightCodec.toString();
|
||||
}
|
||||
|
|
|
@ -24,18 +24,9 @@ import java.util.*;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
||||
import org.apache.lucene.document.DoubleField;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType.NumericType;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.FloatField;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.LongField;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.BaseDirectory;
|
||||
import org.apache.lucene.store.BufferedIndexInput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -43,7 +34,7 @@ import org.apache.lucene.store.IOContext;
|
|||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
@ -200,7 +191,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
|
||||
// LUCENE-1262
|
||||
public void testExceptions() throws Throwable {
|
||||
File indexDir = _TestUtil.getTempDir("testfieldswriterexceptions");
|
||||
File indexDir = TestUtil.getTempDir("testfieldswriterexceptions");
|
||||
|
||||
try {
|
||||
Directory dir = new FaultyFSDirectory(indexDir);
|
||||
|
@ -236,7 +227,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
reader.close();
|
||||
dir.close();
|
||||
} finally {
|
||||
_TestUtil.rmDir(indexDir);
|
||||
TestUtil.rmDir(indexDir);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ public class TestFlex extends LuceneTestCase {
|
|||
public void testTermOrd() throws Exception {
|
||||
Directory d = newDirectory();
|
||||
IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random())).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())));
|
||||
new MockAnalyzer(random())).setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat())));
|
||||
Document doc = new Document();
|
||||
doc.add(newTextField("f", "a b c", Field.Store.NO));
|
||||
w.addDocument(doc);
|
||||
|
|
|
@ -27,7 +27,8 @@ import org.apache.lucene.search.TopDocs;
|
|||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestForTooMuchCloning extends LuceneTestCase {
|
||||
|
||||
|
@ -46,7 +47,7 @@ public class TestForTooMuchCloning extends LuceneTestCase {
|
|||
for(int docs=0;docs<numDocs;docs++) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for(int terms=0;terms<100;terms++) {
|
||||
sb.append(_TestUtil.randomRealisticUnicodeString(random()));
|
||||
sb.append(TestUtil.randomRealisticUnicodeString(random()));
|
||||
sb.append(' ');
|
||||
}
|
||||
final Document doc = new Document();
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestForceMergeForever extends LuceneTestCase {
|
||||
|
||||
|
@ -57,7 +57,7 @@ public class TestForceMergeForever extends LuceneTestCase {
|
|||
final MyIndexWriter w = new MyIndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
|
||||
|
||||
// Try to make an index that requires merging:
|
||||
w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random(), 2, 11));
|
||||
w.getConfig().setMaxBufferedDocs(TestUtil.nextInt(random(), 2, 11));
|
||||
final int numStartDocs = atLeast(20);
|
||||
final LineFileDocs docs = new LineFileDocs(random(), true);
|
||||
for(int docIDX=0;docIDX<numStartDocs;docIDX++) {
|
||||
|
|
|
@ -18,7 +18,8 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.store.ByteArrayDataInput;
|
||||
import org.apache.lucene.store.ByteArrayDataOutput;
|
||||
import org.apache.lucene.store.DataInput;
|
||||
|
@ -100,9 +101,9 @@ public class TestIndexInput extends LuceneTestCase {
|
|||
final long l1;
|
||||
if (rarely()) {
|
||||
// a long with lots of zeroes at the end
|
||||
l1 = LONGS[i] = _TestUtil.nextLong(random, 0, Integer.MAX_VALUE) << 32;
|
||||
l1 = LONGS[i] = TestUtil.nextLong(random, 0, Integer.MAX_VALUE) << 32;
|
||||
} else {
|
||||
l1 = LONGS[i] = _TestUtil.nextLong(random, 0, Long.MAX_VALUE);
|
||||
l1 = LONGS[i] = TestUtil.nextLong(random, 0, Long.MAX_VALUE);
|
||||
}
|
||||
bdo.writeVLong(l1);
|
||||
bdo.writeLong(l1);
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -71,8 +70,8 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.BasicAutomata;
|
||||
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
|
@ -319,7 +318,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(new Field("field", "aaa" + j, storedTextType));
|
||||
writer.addDocument(doc);
|
||||
_TestUtil.syncConcurrentMerges(writer);
|
||||
TestUtil.syncConcurrentMerges(writer);
|
||||
int flushCount = writer.getFlushCount();
|
||||
if (j == 1)
|
||||
lastFlushCount = flushCount;
|
||||
|
@ -378,7 +377,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
int lastFlushCount = -1;
|
||||
for(int j=1;j<52;j++) {
|
||||
writer.deleteDocuments(new Term("field", "aaa" + j));
|
||||
_TestUtil.syncConcurrentMerges(writer);
|
||||
TestUtil.syncConcurrentMerges(writer);
|
||||
int flushCount = writer.getFlushCount();
|
||||
|
||||
if (j == 1)
|
||||
|
@ -554,12 +553,12 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
assertEquals(1, reader.numDocs());
|
||||
Term t = new Term("field", "a");
|
||||
assertEquals(1, reader.docFreq(t));
|
||||
DocsEnum td = _TestUtil.docs(random(), reader,
|
||||
"field",
|
||||
new BytesRef("a"),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
DocsEnum.FLAG_FREQS);
|
||||
DocsEnum td = TestUtil.docs(random(), reader,
|
||||
"field",
|
||||
new BytesRef("a"),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
DocsEnum.FLAG_FREQS);
|
||||
td.nextDoc();
|
||||
assertEquals(128*1024, td.freq());
|
||||
reader.close();
|
||||
|
@ -1145,7 +1144,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
try {
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
} catch (Exception e) {
|
||||
failed = true;
|
||||
System.out.println("CheckIndex FAILED: unexpected exception");
|
||||
|
@ -1311,12 +1310,12 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
|
||||
// test that the terms were indexed.
|
||||
assertTrue(_TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(_TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(_TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(_TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(_TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(_TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -1469,7 +1468,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
// Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
|
||||
// then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
|
||||
// when listAll() was called in IndexFileDeleter.
|
||||
Directory dir = newFSDirectory(_TestUtil.getTempDir("emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
|
||||
Directory dir = newFSDirectory(TestUtil.getTempDir("emptyFSDirNoLock"), NoLockFactory.getNoLockFactory());
|
||||
new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))).close();
|
||||
dir.close();
|
||||
}
|
||||
|
@ -1589,7 +1588,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
indexWriter.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
TestUtil.checkIndex(dir);
|
||||
|
||||
assertNoUnreferencedFiles(dir, "no tv files");
|
||||
DirectoryReader r0 = DirectoryReader.open(dir);
|
||||
|
@ -1776,7 +1775,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testWhetherDeleteAllDeletesWriteLock() throws Exception {
|
||||
Directory d = newFSDirectory(_TestUtil.getTempDir("TestIndexWriter.testWhetherDeleteAllDeletesWriteLock"));
|
||||
Directory d = newFSDirectory(TestUtil.getTempDir("TestIndexWriter.testWhetherDeleteAllDeletesWriteLock"));
|
||||
// Must use SimpleFSLockFactory... NativeFSLockFactory
|
||||
// somehow "knows" a lock is held against write.lock
|
||||
// even if you remove that file:
|
||||
|
@ -2025,7 +2024,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
// LUCENE-4398
|
||||
public void testRotatingFieldNames() throws Exception {
|
||||
Directory dir = newFSDirectory(_TestUtil.getTempDir("TestIndexWriter.testChangingFields"));
|
||||
Directory dir = newFSDirectory(TestUtil.getTempDir("TestIndexWriter.testChangingFields"));
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
iwc.setRAMBufferSizeMB(0.2);
|
||||
iwc.setMaxBufferedDocs(-1);
|
||||
|
@ -2146,7 +2145,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
for (int j = 0; j < numDocs; j++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("id", ""+ (docId++), idFt));
|
||||
doc.add(newField("foo", _TestUtil.randomSimpleString(random()), ft));
|
||||
doc.add(newField("foo", TestUtil.randomSimpleString(random()), ft));
|
||||
docs.add(doc);
|
||||
}
|
||||
boolean success = false;
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
@ -33,7 +32,7 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestIndexWriterCommit extends LuceneTestCase {
|
||||
/*
|
||||
|
@ -178,8 +177,8 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
// sum because the merged FST may use array encoding for
|
||||
// some arcs (which uses more space):
|
||||
|
||||
final String idFormat = _TestUtil.getPostingsFormat("id");
|
||||
final String contentFormat = _TestUtil.getPostingsFormat("content");
|
||||
final String idFormat = TestUtil.getPostingsFormat("id");
|
||||
final String contentFormat = TestUtil.getPostingsFormat("content");
|
||||
assumeFalse("This test cannot run with Memory codec", idFormat.equals("Memory") || contentFormat.equals("Memory"));
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
Analyzer analyzer;
|
||||
|
@ -330,7 +329,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
final Directory dir = newDirectory();
|
||||
final RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
|
||||
_TestUtil.reduceOpenFiles(w.w);
|
||||
TestUtil.reduceOpenFiles(w.w);
|
||||
w.commit();
|
||||
final AtomicBoolean failed = new AtomicBoolean();
|
||||
Thread[] threads = new Thread[NUM_THREADS];
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue