LUCENE-8626: Lucene standardize test naming part 3 and final (#2220)

This commit is contained in:
Marcus 2021-01-22 12:38:52 -05:00 committed by GitHub
parent 32e95ddb3f
commit 4bc5d51494
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
88 changed files with 115 additions and 115 deletions

View File

@ -21,7 +21,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.search.BoostAttribute;
public class DelimitedBoostTokenFilterTest extends BaseTokenStreamTestCase {
public class TestDelimitedBoostTokenFilter extends BaseTokenStreamTestCase {
public void testBoosts() throws Exception {
String test = "The quick|0.4 red|0.5 fox|0.2 jumped|0.1 over the lazy|0.8 brown|0.9 dogs|0.9";

View File

@ -31,7 +31,7 @@ import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.util.TestUtil;
public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
public class TestHTMLStripCharFilter extends BaseTokenStreamTestCase {
private static Analyzer newTestAnalyzer() {
return new Analyzer() {

View File

@ -28,7 +28,7 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/** Tests CommonGrams(Query)Filter */
public class CommonGramsFilterTest extends BaseTokenStreamTestCase {
public class TestCommonGramsFilter extends BaseTokenStreamTestCase {
private static final CharArraySet commonWords =
new CharArraySet(Arrays.asList("s", "a", "b", "c", "d", "the", "of"), false);

View File

@ -20,7 +20,7 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
/** A unit test class for verifying the correct operation of the GreekAnalyzer. */
public class GreekAnalyzerTest extends BaseTokenStreamTestCase {
public class TestGreekAnalyzer extends BaseTokenStreamTestCase {
/**
* Test the analysis of various greek strings.

View File

@ -35,7 +35,7 @@ import org.apache.lucene.util.automaton.RegExp;
import org.junit.Test;
/** Tests for {@link MinHashFilter} */
public class MinHashFilterTest extends BaseTokenStreamTestCase {
public class TestMinHashFilter extends BaseTokenStreamTestCase {
@Test
public void testIntHash() {

View File

@ -23,7 +23,7 @@ import java.util.Locale;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
public class DateRecognizerFilterTest extends BaseTokenStreamTestCase {
public class TestDateRecognizerFilter extends BaseTokenStreamTestCase {
public void test() throws IOException {
final String test =

View File

@ -20,7 +20,7 @@ import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
public class DateRecognizerFilterFactoryTest extends BaseTokenStreamTestCase {
public class TestDateRecognizerFilterFactory extends BaseTokenStreamTestCase {
public void testBadLanguageTagThrowsException() {
expectThrows(

View File

@ -22,7 +22,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute;
public class DelimitedTermFrequencyTokenFilterTest extends BaseTokenStreamTestCase {
public class TestDelimitedTermFrequencyTokenFilter extends BaseTokenStreamTestCase {
public void testTermFrequency() throws Exception {
String test = "The quick|40 red|4 fox|06 jumped|1 over the lazy|2 brown|123 dogs|1024";

View File

@ -35,7 +35,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.util.TestUtil;
/** Tests {@link EdgeNGramTokenFilter} for correctness. */
public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
public class TestEdgeNGramTokenFilter extends BaseTokenStreamTestCase {
private TokenStream input;
@Override

View File

@ -26,7 +26,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.util.TestUtil;
/** Tests {@link EdgeNGramTokenizer} for correctness. */
public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
public class TestEdgeNGramTokenizer extends BaseTokenStreamTestCase {
private StringReader input;
@Override
@ -146,7 +146,7 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
private static void testNGrams(int minGram, int maxGram, String s, String nonTokenChars)
throws IOException {
NGramTokenizerTest.testNGrams(minGram, maxGram, s, nonTokenChars, true);
TestNGramTokenizer.testNGrams(minGram, maxGram, s, nonTokenChars, true);
}
public void testLargeInput() throws IOException {

View File

@ -34,7 +34,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.util.TestUtil;
/** Tests {@link NGramTokenFilter} for correctness. */
public class NGramTokenFilterTest extends BaseTokenStreamTestCase {
public class TestNGramTokenFilter extends BaseTokenStreamTestCase {
private TokenStream input;
@Override

View File

@ -31,7 +31,7 @@ import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.TestUtil;
/** Tests {@link NGramTokenizer} for correctness. */
public class NGramTokenizerTest extends BaseTokenStreamTestCase {
public class TestNGramTokenizer extends BaseTokenStreamTestCase {
private StringReader input;
@Override

View File

@ -23,7 +23,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.util.BytesRef;
public class DelimitedPayloadTokenFilterTest extends BaseTokenStreamTestCase {
public class TestDelimitedPayloadTokenFilter extends BaseTokenStreamTestCase {
public void testPayloads() throws Exception {
String test = "The quick|JJ red|JJ fox|NN jumped|VB over the lazy|JJ brown|JJ dogs|NN";

View File

@ -26,7 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
public class NumericPayloadTokenFilterTest extends BaseTokenStreamTestCase {
public class TestNumericPayloadTokenFilter extends BaseTokenStreamTestCase {
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";

View File

@ -22,7 +22,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.util.BytesRef;
public class TokenOffsetPayloadTokenFilterTest extends BaseTokenStreamTestCase {
public class TestTokenOffsetPayloadTokenFilter extends BaseTokenStreamTestCase {
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";

View File

@ -24,7 +24,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
public class TypeAsPayloadTokenFilterTest extends BaseTokenStreamTestCase {
public class TestTypeAsPayloadTokenFilter extends BaseTokenStreamTestCase {
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";

View File

@ -29,7 +29,7 @@ import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.apache.lucene.store.Directory;
public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
public class TestQueryAutoStopWordAnalyzer extends BaseTokenStreamTestCase {
String variedFieldValues[] = {
"the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "boring", "dog"
};

View File

@ -25,7 +25,7 @@ import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.util.graph.GraphTokenStreamFiniteStrings;
public class FixedShingleFilterTest extends BaseTokenStreamTestCase {
public class TestFixedShingleFilter extends BaseTokenStreamTestCase {
public void testBiGramFilter() throws IOException {

View File

@ -44,7 +44,7 @@ import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
/** A test class for ShingleAnalyzerWrapper as regards queries and scoring. */
public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
public class TestShingleAnalyzerWrapper extends BaseTokenStreamTestCase {
private Analyzer analyzer;
private IndexSearcher searcher;
private IndexReader reader;

View File

@ -30,7 +30,7 @@ import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
public class ShingleFilterTest extends BaseTokenStreamTestCase {
public class TestShingleFilter extends BaseTokenStreamTestCase {
public static final Token[] TEST_TOKEN =
new Token[] {

View File

@ -31,7 +31,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
/** Basic Tests for {@link WikipediaTokenizer} */
public class WikipediaTokenizerTest extends BaseTokenStreamTestCase {
public class TestWikipediaTokenizer extends BaseTokenStreamTestCase {
protected static final String LINK_PHRASES =
"click [[link here again]] click [http://lucene.apache.org here again] [[Category:a b c d]]";

View File

@ -35,7 +35,7 @@ import org.apache.lucene.util.fst.FST;
import org.apache.lucene.util.fst.IntsRefFSTEnum;
/** Tests of TokenInfoDictionary build tools; run using ant test-tools */
public class TokenInfoDictionaryTest extends LuceneTestCase {
public class TestTokenInfoDictionary extends LuceneTestCase {
public void testPut() throws Exception {
TokenInfoDictionary dict =

View File

@ -22,7 +22,7 @@ import org.apache.lucene.analysis.ja.TestJapaneseTokenizer;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
public class UserDictionaryTest extends LuceneTestCase {
public class TestUserDictionary extends LuceneTestCase {
@Test
public void testLookup() throws IOException {

View File

@ -19,7 +19,7 @@ package org.apache.lucene.analysis.ja.util;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
public class UnknownDictionaryTest extends LuceneTestCase {
public class TestUnknownDictionary extends LuceneTestCase {
public static final String FILENAME = "unk-tokeninfo-dict.obj";
@Test

View File

@ -34,7 +34,7 @@ import org.apache.lucene.util.fst.FST;
import org.apache.lucene.util.fst.IntsRefFSTEnum;
/** Tests of TokenInfoDictionary build tools; run using ant test-tools */
public class TokenInfoDictionaryTest extends LuceneTestCase {
public class TestTokenInfoDictionary extends LuceneTestCase {
public void testPut() throws Exception {
TokenInfoDictionary dict =

View File

@ -23,7 +23,7 @@ import org.apache.lucene.analysis.ko.TestKoreanTokenizer;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
public class UserDictionaryTest extends LuceneTestCase {
public class TestUserDictionary extends LuceneTestCase {
@Test
public void testLookup() throws IOException {
UserDictionary dictionary = TestKoreanTokenizer.readDict();

View File

@ -19,7 +19,7 @@ package org.apache.lucene.analysis.ko.util;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
public class UnknownDictionaryTest extends LuceneTestCase {
public class TestUnknownDictionary extends LuceneTestCase {
@Test
public void testPutCharacterCategory() {

View File

@ -25,7 +25,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.util.TestUtil;
public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase {
public class TestDoubleMetaphoneFilter extends BaseTokenStreamTestCase {
public void testSize4FalseInject() throws Exception {
TokenStream stream = whitespaceMockTokenizer("international");

View File

@ -38,7 +38,7 @@ import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.IOUtils;
/** Tests the functionality of {@link DocMaker}. */
public class DocMakerTest extends BenchmarkTestCase {
public class TestDocMaker extends BenchmarkTestCase {
public static final class OneDocSource extends ContentSource {

View File

@ -26,7 +26,7 @@ import org.apache.lucene.benchmark.byTask.utils.Config;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
public class EnwikiContentSourceTest extends LuceneTestCase {
public class TestEnwikiContentSource extends LuceneTestCase {
/** An EnwikiContentSource which works on a String and not files. */
private static class StringableEnwikiSource extends EnwikiContentSource {

View File

@ -45,7 +45,7 @@ import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.IOUtils;
/** Tests the functionality of {@link LineDocSource}. */
public class LineDocSourceTest extends BenchmarkTestCase {
public class TestLineDocSource extends BenchmarkTestCase {
private static final CompressorStreamFactory csFactory = new CompressorStreamFactory();

View File

@ -31,7 +31,7 @@ import org.apache.lucene.document.DateTools;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
public class TrecContentSourceTest extends LuceneTestCase {
public class TestTrecContentSource extends LuceneTestCase {
/** A TrecDocMaker which works on a String and not files. */
private static class StringableTrecSource extends TrecContentSource {

View File

@ -33,7 +33,7 @@ import org.junit.AfterClass;
import org.junit.BeforeClass;
/** Tests the functionality of {@link AddIndexesTask}. */
public class AddIndexesTaskTest extends BenchmarkTestCase {
public class TestAddIndexesTask extends BenchmarkTestCase {
private static Path testDir, inputDir;

View File

@ -24,7 +24,7 @@ import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.util.Version;
/** Tests the functionality of {@link CreateIndexTask}. */
public class CommitIndexTaskTest extends BenchmarkTestCase {
public class TestCommitIndexTask extends BenchmarkTestCase {
private PerfRunData createPerfRunData() throws Exception {
Properties props = new Properties();

View File

@ -31,7 +31,7 @@ import org.apache.lucene.index.NoMergeScheduler;
import org.apache.lucene.util.Version;
/** Tests the functionality of {@link CreateIndexTask}. */
public class CreateIndexTaskTest extends BenchmarkTestCase {
public class TestCreateIndexTask extends BenchmarkTestCase {
private PerfRunData createPerfRunData(String infoStreamValue) throws Exception {
Properties props = new Properties();

View File

@ -22,7 +22,7 @@ import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.benchmark.byTask.utils.Config;
/** Tests the functionality of the abstract {@link PerfTask}. */
public class PerfTaskTest extends BenchmarkTestCase {
public class TestPerfTask extends BenchmarkTestCase {
private static final class MyPerfTask extends PerfTask {

View File

@ -22,7 +22,7 @@ import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.benchmark.byTask.utils.Config;
import org.apache.lucene.search.SortField;
public class SearchWithSortTaskTest extends BenchmarkTestCase {
public class TestSearchWithSortTask extends BenchmarkTestCase {
public void testSetParams_docField() throws Exception {
SearchWithSortTask task = new SearchWithSortTask(new PerfRunData(new Config(new Properties())));

View File

@ -31,7 +31,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
/** Tests the functionality of {@link WriteEnwikiLineDocTask}. */
public class WriteEnwikiLineDocTaskTest extends BenchmarkTestCase {
public class TestWriteEnwikiLineDocTask extends BenchmarkTestCase {
// class has to be public so that Class.forName.newInstance() will work
/** Interleaves category docs with regular docs */
@ -72,7 +72,7 @@ public class WriteEnwikiLineDocTaskTest extends BenchmarkTestCase {
throws Exception {
try (BufferedReader br = Files.newBufferedReader(file, StandardCharsets.UTF_8)) {
String line = br.readLine();
WriteLineDocTaskTest.assertHeaderLine(line);
TestWriteLineDocTask.assertHeaderLine(line);
for (int i = 0; i < n; i++) {
line = br.readLine();
assertNotNull(line);

View File

@ -36,7 +36,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
/** Tests the functionality of {@link WriteLineDocTask}. */
public class WriteLineDocTaskTest extends BenchmarkTestCase {
public class TestWriteLineDocTask extends BenchmarkTestCase {
// class has to be public so that Class.forName.newInstance() will work
public static final class WriteLineDocMaker extends DocMaker {

View File

@ -20,7 +20,7 @@ import org.apache.lucene.benchmark.BenchmarkTestCase;
import org.apache.lucene.benchmark.byTask.Benchmark;
/** Tests that tasks in alternate packages are found. */
public class AltPackageTaskTest extends BenchmarkTestCase {
public class TestAltPackageTask extends BenchmarkTestCase {
/** Benchmark should fail loading the algorithm when alt is not specified */
public void testWithoutAlt() throws Exception {

View File

@ -31,7 +31,7 @@ import org.apache.lucene.benchmark.BenchmarkTestCase;
import org.junit.Before;
import org.junit.Test;
public class StreamUtilsTest extends BenchmarkTestCase {
public class TestStreamUtils extends BenchmarkTestCase {
private static final String TEXT = "Some-Text...";
private Path testDir;

View File

@ -33,7 +33,7 @@ import org.apache.lucene.util.BytesRef;
import org.junit.Test;
/** Tests for {@link BM25NBClassifier} */
public class BM25NBClassifierTest extends ClassificationTestBase<BytesRef> {
public class TestBM25NBClassifier extends ClassificationTestBase<BytesRef> {
@Test
public void testBasicUsage() throws Exception {

View File

@ -28,7 +28,7 @@ import org.apache.lucene.util.BytesRef;
import org.junit.Test;
/** Testcase for {@link org.apache.lucene.classification.BooleanPerceptronClassifier} */
public class BooleanPerceptronClassifierTest extends ClassificationTestBase<Boolean> {
public class TestBooleanPerceptronClassifier extends ClassificationTestBase<Boolean> {
@Test
public void testBasicUsage() throws Exception {

View File

@ -33,7 +33,7 @@ import org.apache.lucene.util.BytesRef;
import org.junit.Test;
/** Testcase for {@link org.apache.lucene.classification.CachingNaiveBayesClassifier} */
public class CachingNaiveBayesClassifierTest extends ClassificationTestBase<BytesRef> {
public class TestCachingNaiveBayesClassifier extends ClassificationTestBase<BytesRef> {
@Test
public void testBasicUsage() throws Exception {

View File

@ -28,7 +28,7 @@ import org.apache.lucene.util.BytesRef;
import org.junit.Test;
/** Tests for {@link KNearestFuzzyClassifier} */
public class KNearestFuzzyClassifierTest extends ClassificationTestBase<BytesRef> {
public class TestKNearestFuzzyClassifier extends ClassificationTestBase<BytesRef> {
@Test
public void testBasicUsage() throws Exception {

View File

@ -33,7 +33,7 @@ import org.apache.lucene.util.BytesRef;
import org.junit.Test;
/** Testcase for {@link KNearestNeighborClassifier} */
public class KNearestNeighborClassifierTest extends ClassificationTestBase<BytesRef> {
public class TestKNearestNeighborClassifier extends ClassificationTestBase<BytesRef> {
@Test
public void testBasicUsage() throws Exception {

View File

@ -33,7 +33,7 @@ import org.apache.lucene.util.BytesRef;
import org.junit.Test;
/** Testcase for {@link SimpleNaiveBayesClassifier} */
public class SimpleNaiveBayesClassifierTest extends ClassificationTestBase<BytesRef> {
public class TestSimpleNaiveBayesClassifier extends ClassificationTestBase<BytesRef> {
@Test
public void testBasicUsage() throws Exception {

View File

@ -23,7 +23,7 @@ import org.apache.lucene.util.BytesRef;
import org.junit.Test;
/** Tests for {@link org.apache.lucene.classification.KNearestNeighborClassifier} */
public class KNearestNeighborDocumentClassifierTest
public class TestKNearestNeighborDocumentClassifier
extends DocumentClassificationTestBase<BytesRef> {
@Test

View File

@ -20,7 +20,7 @@ import org.apache.lucene.util.BytesRef;
import org.junit.Test;
/** Tests for {@link org.apache.lucene.classification.SimpleNaiveBayesClassifier} */
public class SimpleNaiveBayesDocumentClassifierTest
public class TestSimpleNaiveBayesDocumentClassifier
extends DocumentClassificationTestBase<BytesRef> {
@Test

View File

@ -33,7 +33,7 @@ import org.apache.lucene.util.BytesRef;
import org.junit.Test;
/** Tests for {@link ConfusionMatrixGenerator} */
public class ConfusionMatrixGeneratorTest extends ClassificationTestBase<Object> {
public class TestConfusionMatrixGenerator extends ClassificationTestBase<Object> {
@Test
public void testGetConfusionMatrix() throws Exception {

View File

@ -39,7 +39,7 @@ import org.junit.Test;
/** Testcase for {@link org.apache.lucene.classification.utils.DatasetSplitter} */
@LuceneTestCase.SuppressCodecs("SimpleText")
public class DataSplitterTest extends LuceneTestCase {
public class TestDataSplitter extends LuceneTestCase {
private LeafReader originalIndex;
private RandomIndexWriter indexWriter;

View File

@ -34,7 +34,7 @@ import org.junit.Before;
import org.junit.Test;
/** Testcase for {@link org.apache.lucene.classification.utils.DocToDoubleVectorUtils} */
public class DocToDoubleVectorUtilsTest extends LuceneTestCase {
public class TestDocToDoubleVectorUtils extends LuceneTestCase {
private IndexReader index;
private Directory dir;

View File

@ -31,7 +31,7 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.junit.Test;
public class FuzzyTermOnShortTermsTest extends LuceneTestCase {
public class TestFuzzyTermOnShortTerms extends LuceneTestCase {
private static final String FIELD = "field";
@Test

View File

@ -46,7 +46,7 @@ import org.apache.lucene.util.RamUsageTester;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.automaton.ByteRunAutomaton;
public class TermInSetQueryTest extends LuceneTestCase {
public class TestTermInSetQuery extends LuceneTestCase {
public void testDuel() throws IOException {
final int iters = atLeast(2);

View File

@ -31,7 +31,7 @@ import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.fst.Util;
/** Test for {@link FiniteStringsIterator}. */
public class FiniteStringsIteratorTest extends LuceneTestCase {
public class TestFiniteStringsIterator extends LuceneTestCase {
public void testRandomFiniteStrings1() {
int numStrings = atLeast(100);
if (VERBOSE) {

View File

@ -16,7 +16,7 @@
*/
package org.apache.lucene.util.automaton;
import static org.apache.lucene.util.automaton.FiniteStringsIteratorTest.getFiniteStrings;
import static org.apache.lucene.util.automaton.TestFiniteStringsIterator.getFiniteStrings;
import java.util.List;
import org.apache.lucene.util.IntsRef;
@ -26,7 +26,7 @@ import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.fst.Util;
/** Test for {@link FiniteStringsIterator}. */
public class LimitedFiniteStringsIteratorTest extends LuceneTestCase {
public class TestLimitedFiniteStringsIterator extends LuceneTestCase {
public void testRandomFiniteStrings() {
// Just makes sure we can run on any random finite
// automaton:

View File

@ -28,7 +28,7 @@ import java.util.stream.Collectors;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
public class SimpleIniFileTest extends LuceneTestCase {
public class TestSimpleIniFile extends LuceneTestCase {
@Test
public void testStore() throws IOException {

View File

@ -32,7 +32,7 @@ import org.apache.lucene.luke.models.LukeException;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
public class AnalysisImplTest extends LuceneTestCase {
public class TestAnalysisImpl extends LuceneTestCase {
@Test
public void testGetPresetAnalyzerTypes() throws Exception {

View File

@ -37,7 +37,7 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class CommitsImplTest extends LuceneTestCase {
public class TestCommitsImpl extends LuceneTestCase {
private DirectoryReader reader;

View File

@ -33,7 +33,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.junit.Test;
public class DocValuesAdapterTest extends DocumentsTestBase {
public class TestDocValuesAdapter extends DocumentsTestBase {
@Override
protected void createIndex() throws IOException {

View File

@ -34,7 +34,7 @@ import org.junit.Test;
"FastCompressingStoredFieldsData",
"FastDecompressionCompressingStoredFieldsData"
})
public class DocumentsImplTest extends DocumentsTestBase {
public class TestDocumentsImpl extends DocumentsTestBase {
@Test
public void testGetMaxDoc() {

View File

@ -27,7 +27,7 @@ import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.Directory;
import org.junit.Test;
public class TermVectorsAdapterTest extends DocumentsTestBase {
public class TestTermVectorsAdapter extends DocumentsTestBase {
@Override
protected void createIndex() throws IOException {

View File

@ -25,7 +25,7 @@ import java.util.Map;
import org.apache.lucene.store.AlreadyClosedException;
import org.junit.Test;
public class OverviewImplTest extends OverviewTestBase {
public class TestOverviewImpl extends OverviewTestBase {
@Test
public void testGetIndexPath() {

View File

@ -22,7 +22,7 @@ import java.util.Arrays;
import java.util.Map;
import org.junit.Test;
public class TermCountsTest extends OverviewTestBase {
public class TestTermCounts extends OverviewTestBase {
@Test
public void testNumTerms() throws Exception {

View File

@ -20,7 +20,7 @@ package org.apache.lucene.luke.models.overview;
import java.util.List;
import org.junit.Test;
public class TopTermsTest extends OverviewTestBase {
public class TestTopTerms extends OverviewTestBase {
@Test
public void testGetTopTerms() throws Exception {

View File

@ -51,7 +51,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
public class SearchImplTest extends LuceneTestCase {
public class TestSearchImpl extends LuceneTestCase {
private IndexReader reader;
private Directory dir;

View File

@ -36,7 +36,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
/** Test of the SweetSpotSimilarity */
public class SweetSpotSimilarityTest extends LuceneTestCase {
public class TestSweetSpotSimilarity extends LuceneTestCase {
private static float computeNorm(Similarity sim, String field, int length) throws IOException {
String value = IntStream.range(0, length).mapToObj(i -> "a").collect(Collectors.joining(" "));

View File

@ -26,7 +26,7 @@ import org.apache.lucene.util.LuceneTestCase;
import org.junit.Rule;
import org.junit.rules.TestRule;
public class WindowsDirectoryTest extends LuceneTestCase {
public class TestWindowsDirectory extends LuceneTestCase {
@Rule
public static TestRule requiresNative =
new NativeLibEnableRule(EnumSet.of(NativeLibEnableRule.OperatingSystem.WINDOWS));

View File

@ -70,5 +70,5 @@ WildCardTermEnum (correct name?) should be no problem.
Warnings about missing terms are sent to System.out, this might
be replaced by another stream, and tested for in the tests.
BooleanQueryTst.TestCollector uses a results checking method that should
TestBooleanQuery.TestCollector uses a results checking method that should
be replaced by the checking method from Lucene's TestBasics.java.

View File

@ -57,7 +57,7 @@ public class Test01Exceptions extends LuceneTestCase {
};
public void test01Exceptions() throws Exception {
String m = ExceptionQueryTst.getFailQueries(exceptionQueries, verbose);
String m = TestExceptionQuery.getFailQueries(exceptionQueries, verbose);
if (m.length() > 0) {
fail("No ParseException for:\n" + m);
}

View File

@ -46,11 +46,11 @@ public class Test02Boolean extends LuceneTestCase {
SingleFieldTestDb db1;
public void normalTest1(String query, int[] expdnrs) throws Exception {
BooleanQueryTst bqt =
new BooleanQueryTst(
TestBooleanQuery tbq =
new TestBooleanQuery(
query, expdnrs, db1, fieldName, this, new BasicQueryFactory(maxBasicQueries));
bqt.setVerbose(verbose);
bqt.doTest();
tbq.setVerbose(verbose);
tbq.doTest();
}
public void test02Terms01() throws Exception {

View File

@ -41,7 +41,7 @@ public class Test03Distance extends LuceneTestCase {
};
public void test00Exceptions() throws Exception {
String m = ExceptionQueryTst.getFailQueries(exceptionQueries, verbose);
String m = TestExceptionQuery.getFailQueries(exceptionQueries, verbose);
if (m.length() > 0) {
fail("No ParseException for:\n" + m);
}
@ -68,11 +68,11 @@ public class Test03Distance extends LuceneTestCase {
}
private void distanceTst(String query, int[] expdnrs, SingleFieldTestDb db) throws Exception {
BooleanQueryTst bqt =
new BooleanQueryTst(
TestBooleanQuery tbq =
new TestBooleanQuery(
query, expdnrs, db, fieldName, this, new BasicQueryFactory(maxBasicQueries));
bqt.setVerbose(verbose);
bqt.doTest();
tbq.setVerbose(verbose);
tbq.doTest();
}
public void distanceTest1(String query, int[] expdnrs) throws Exception {

View File

@ -28,7 +28,7 @@ import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.SimpleCollector;
import org.junit.Assert;
public class BooleanQueryTst {
public class TestBooleanQuery {
String queryText;
final int[] expectedDocNrs;
SingleFieldTestDb dBase;
@ -37,7 +37,7 @@ public class BooleanQueryTst {
BasicQueryFactory qf;
boolean verbose = true;
public BooleanQueryTst(
public TestBooleanQuery(
String queryText,
int[] expectedDocNrs,
SingleFieldTestDb dBase,

View File

@ -19,11 +19,11 @@ package org.apache.lucene.queryparser.surround.query;
import org.apache.lucene.queryparser.surround.parser.ParseException;
import org.apache.lucene.queryparser.surround.parser.QueryParser;
public class ExceptionQueryTst {
public class TestExceptionQuery {
private String queryText;
private boolean verbose;
public ExceptionQueryTst(String queryText, boolean verbose) {
public TestExceptionQuery(String queryText, boolean verbose) {
this.queryText = queryText;
this.verbose = verbose;
}
@ -53,7 +53,7 @@ public class ExceptionQueryTst {
public static String getFailQueries(String[] exceptionQueries, boolean verbose) {
StringBuilder failQueries = new StringBuilder();
for (int i = 0; i < exceptionQueries.length; i++) {
new ExceptionQueryTst(exceptionQueries[i], verbose).doTest(failQueries);
new TestExceptionQuery(exceptionQueries[i], verbose).doTest(failQueries);
}
return failQueries.toString();
}

View File

@ -23,7 +23,7 @@ import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
/** */
public class SrndQueryTest extends LuceneTestCase {
public class TestSrndQuery extends LuceneTestCase {
void checkEqualParsings(String s1, String s2) throws Exception {
String fieldName = "foo";

View File

@ -48,7 +48,7 @@ import org.junit.Test;
// See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows
// machines occasionally
public class DocumentDictionaryTest extends LuceneTestCase {
public class TestDocumentDictionary extends LuceneTestCase {
static final String FIELD_NAME = "f1";
static final String WEIGHT_FIELD_NAME = "w1";

View File

@ -49,7 +49,7 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
public class TestDocumentValueSourceDictionary extends LuceneTestCase {
static final String FIELD_NAME = "f1";
static final String WEIGHT_FIELD_NAME_1 = "w1";

View File

@ -29,7 +29,7 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.junit.Test;
public class FileDictionaryTest extends LuceneTestCase {
public class TestFileDictionary extends LuceneTestCase {
private Map.Entry<List<String>, String> generateFileEntry(
String fieldDelimiter, boolean hasWeight, boolean hasPayload) {

View File

@ -49,7 +49,7 @@ import org.junit.Ignore;
/** Benchmarks tests for implementations of {@link Lookup} interface. */
@Ignore("COMMENT ME TO RUN BENCHMARKS!")
public class LookupBenchmarkTest extends LuceneTestCase {
public class TestLookupBenchmark extends LuceneTestCase {
@SuppressWarnings({"unchecked", "deprecation"})
private final List<Class<? extends Lookup>> benchmarkClasses =
Arrays.asList(
@ -83,9 +83,9 @@ public class LookupBenchmarkTest extends LuceneTestCase {
assert false : "disable assertions before running benchmarks!";
List<Input> input = readTop50KWiki();
Collections.shuffle(input, random);
LookupBenchmarkTest.dictionaryInput = input.toArray(new Input[input.size()]);
TestLookupBenchmark.dictionaryInput = input.toArray(new Input[input.size()]);
Collections.shuffle(input, random);
LookupBenchmarkTest.benchmarkInput = input;
TestLookupBenchmark.benchmarkInput = input;
}
static final Charset UTF_8 = StandardCharsets.UTF_8;
@ -93,7 +93,7 @@ public class LookupBenchmarkTest extends LuceneTestCase {
/** Collect the multilingual input for benchmarks/ tests. */
public static List<Input> readTop50KWiki() throws Exception {
List<Input> input = new ArrayList<>();
URL resource = LookupBenchmarkTest.class.getResource("Top50KWiki.utf8");
URL resource = TestLookupBenchmark.class.getResource("Top50KWiki.utf8");
assert resource != null : "Resource missing: Top50KWiki.utf8";
String line = null;

View File

@ -28,7 +28,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
public class PersistenceTest extends LuceneTestCase {
public class TestPersistence extends LuceneTestCase {
public final String[] keys =
new String[] {
"one",

View File

@ -49,7 +49,7 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.junit.Test;
public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
public class TestAnalyzingInfixSuggester extends LuceneTestCase {
public void testBasic() throws Exception {
Input keys[] =

View File

@ -57,7 +57,7 @@ import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
public class AnalyzingSuggesterTest extends LuceneTestCase {
public class TestAnalyzingSuggester extends LuceneTestCase {
/** this is basically the WFST test ported to KeywordAnalyzer. so it acts the same */
public void testKeyword() throws Exception {

View File

@ -35,7 +35,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
public class BlendedInfixSuggesterTest extends LuceneTestCase {
public class TestBlendedInfixSuggester extends LuceneTestCase {
/** Test the weight transformation depending on the position of the matching term. */
public void testBlendedSort() throws IOException {

View File

@ -51,7 +51,7 @@ public class TestFreeTextSuggester extends LuceneTestCase {
public void testBasic() throws Exception {
Iterable<Input> keys =
AnalyzingSuggesterTest.shuffle(
TestAnalyzingSuggester.shuffle(
new Input("foo bar baz blah", 50), new Input("boo foo bar foo bee", 20));
Analyzer a = new MockAnalyzer(random());
@ -96,7 +96,7 @@ public class TestFreeTextSuggester extends LuceneTestCase {
public void testIllegalByteDuringBuild() throws Exception {
// Default separator is INFORMATION SEPARATOR TWO
// (0x1e), so no input token is allowed to contain it
Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("foo\u001ebar baz", 50));
Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("foo\u001ebar baz", 50));
Analyzer analyzer = new MockAnalyzer(random());
FreeTextSuggester sug = new FreeTextSuggester(analyzer);
expectThrows(
@ -111,7 +111,7 @@ public class TestFreeTextSuggester extends LuceneTestCase {
public void testIllegalByteDuringQuery() throws Exception {
// Default separator is INFORMATION SEPARATOR TWO
// (0x1e), so no input token is allowed to contain it
Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("foo bar baz", 50));
Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("foo bar baz", 50));
Analyzer analyzer = new MockAnalyzer(random());
FreeTextSuggester sug = new FreeTextSuggester(analyzer);
sug.build(new InputArrayIterator(keys));
@ -196,7 +196,7 @@ public class TestFreeTextSuggester extends LuceneTestCase {
// Make sure you can suggest based only on unigram model:
public void testUnigrams() throws Exception {
Iterable<Input> keys =
AnalyzingSuggesterTest.shuffle(new Input("foo bar baz blah boo foo bar foo bee", 50));
TestAnalyzingSuggester.shuffle(new Input("foo bar baz blah boo foo bar foo bee", 50));
Analyzer a = new MockAnalyzer(random());
FreeTextSuggester sug = new FreeTextSuggester(a, a, 1, (byte) 0x20);
@ -208,7 +208,7 @@ public class TestFreeTextSuggester extends LuceneTestCase {
// Make sure the last token is not duplicated
public void testNoDupsAcrossGrams() throws Exception {
Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("foo bar bar bar bar", 50));
Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("foo bar bar bar bar", 50));
Analyzer a = new MockAnalyzer(random());
FreeTextSuggester sug = new FreeTextSuggester(a, a, 2, (byte) 0x20);
sug.build(new InputArrayIterator(keys));
@ -218,7 +218,7 @@ public class TestFreeTextSuggester extends LuceneTestCase {
// Lookup of just empty string produces unicode only matches:
public void testEmptyString() throws Exception {
Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("foo bar bar bar bar", 50));
Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("foo bar bar bar bar", 50));
Analyzer a = new MockAnalyzer(random());
FreeTextSuggester sug = new FreeTextSuggester(a, a, 2, (byte) 0x20);
sug.build(new InputArrayIterator(keys));
@ -245,7 +245,7 @@ public class TestFreeTextSuggester extends LuceneTestCase {
}
};
Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("wizard of oz", 50));
Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("wizard of oz", 50));
FreeTextSuggester sug = new FreeTextSuggester(a, a, 3, (byte) 0x20);
sug.build(new InputArrayIterator(keys));
assertEquals("wizard _ oz/1.00", toString(sug.lookup("wizard of", 10)));
@ -271,7 +271,7 @@ public class TestFreeTextSuggester extends LuceneTestCase {
}
};
Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("wizard of of oz", 50));
Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("wizard of of oz", 50));
FreeTextSuggester sug = new FreeTextSuggester(a, a, 3, (byte) 0x20);
sug.build(new InputArrayIterator(keys));
assertEquals("", toString(sug.lookup("wizard of of", 10)));

View File

@ -50,7 +50,7 @@ import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.FiniteStringsIterator;
import org.apache.lucene.util.fst.Util;
public class FuzzySuggesterTest extends LuceneTestCase {
public class TestFuzzySuggester extends LuceneTestCase {
public void testRandomEdits() throws IOException {
List<Input> keys = new ArrayList<>();
@ -284,7 +284,7 @@ public class FuzzySuggesterTest extends LuceneTestCase {
public void testGraphDups() throws Exception {
final Analyzer analyzer =
new AnalyzingSuggesterTest.MultiCannedAnalyzer(
new TestAnalyzingSuggester.MultiCannedAnalyzer(
new CannedTokenStream(
token("wifi", 1, 1),
token("hotspot", 0, 2),
@ -343,7 +343,7 @@ public class FuzzySuggesterTest extends LuceneTestCase {
// synonym module
final Analyzer analyzer =
new AnalyzingSuggesterTest.MultiCannedAnalyzer(
new TestAnalyzingSuggester.MultiCannedAnalyzer(
new CannedTokenStream(token("ab", 1, 1), token("ba", 0, 1), token("xc", 1, 1)),
new CannedTokenStream(token("ba", 1, 1), token("xd", 1, 1)),
new CannedTokenStream(token("ab", 1, 1), token("ba", 0, 1), token("x", 1, 1)));
@ -387,7 +387,7 @@ public class FuzzySuggesterTest extends LuceneTestCase {
private Analyzer getUnusualAnalyzer() {
// First three calls just returns "a", then returns ["a","b"], then "a" again
return new AnalyzingSuggesterTest.MultiCannedAnalyzer(
return new TestAnalyzingSuggester.MultiCannedAnalyzer(
new CannedTokenStream(token("a", 1, 1)),
new CannedTokenStream(token("a", 1, 1)),
new CannedTokenStream(token("a", 1, 1)),

View File

@ -26,7 +26,7 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.OfflineSorter;
import org.junit.Test;
public class BytesRefSortersTest extends LuceneTestCase {
public class TestBytesRefSorters extends LuceneTestCase {
@Test
public void testExternalRefSorter() throws Exception {
Directory tempDir = newDirectory();

View File

@ -25,7 +25,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.*;
/** Unit tests for {@link FSTCompletion}. */
public class FSTCompletionTest extends LuceneTestCase {
public class TestFSTCompletion extends LuceneTestCase {
public static Input tf(String t, int v) {
return new Input(t, v);
@ -177,7 +177,7 @@ public class FSTCompletionTest extends LuceneTestCase {
@Slow
public void testMultilingualInput() throws Exception {
List<Input> input = LookupBenchmarkTest.readTop50KWiki();
List<Input> input = TestLookupBenchmark.readTop50KWiki();
Directory tempDir = getDirectory();
FSTCompletionLookup lookup = new FSTCompletionLookup(tempDir, "fst");

View File

@ -25,7 +25,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
public class WFSTCompletionTest extends LuceneTestCase {
public class TestWFSTCompletion extends LuceneTestCase {
public void testBasic() throws Exception {
Input keys[] =