LUCENE-10301: make the test-framework a proper module by moving all test

classes to org.apache.lucene.tests.*. Also changes distribution layout
(all modules are now under modules/).
This commit is contained in:
Dawid Weiss 2021-12-21 20:30:45 +01:00
parent 119c7c29ae
commit a94fbb79ac
1668 changed files with 5775 additions and 4334 deletions

View File

@ -588,7 +588,7 @@ def verifyUnpacked(java, artifact, unpackPath, gitRevision, version, testArgs):
if len(in_lucene_folder) > 0:
raise RuntimeError('lucene: unexpected files/dirs in artifact %s lucene/ folder: %s' % (artifact, in_lucene_folder))
else:
is_in_list(in_root_folder, ['bin', 'docs', 'licenses', 'modules', 'modules-test-framework', 'modules-thirdparty'])
is_in_list(in_root_folder, ['bin', 'docs', 'licenses', 'modules', 'modules-thirdparty'])
if len(in_root_folder) > 0:
raise RuntimeError('lucene: unexpected files/dirs in artifact %s: %s' % (artifact, in_root_folder))

View File

@ -226,22 +226,6 @@ configure(project(":lucene:core")) {
}
}
// Fix for Java 11 Javadoc tool that cannot handle split packages between modules correctly.
// (by removing all the packages which are part of lucene-core)
// See: https://issues.apache.org/jira/browse/LUCENE-8738?focusedCommentId=16818106&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-16818106
// LUCENE-9499: This workaround should be applied only to test-framework (we have no split package in other modules).
configure(project(":lucene:test-framework")) {
project.tasks.withType(RenderJavadocTask) {
doLast {
Set luceneCorePackages = file("${project(':lucene:core').tasks[name].outputDir}/element-list").readLines('UTF-8').toSet();
File elementFile = file("${outputDir}/element-list");
List elements = elementFile.readLines('UTF-8');
elements.removeAll(luceneCorePackages)
elementFile.write(elements.join('\n').concat('\n'), 'UTF-8');
}
}
}
configure(project(':lucene:demo')) {
project.tasks.withType(RenderJavadocTask) {
// For the demo, we link the example source in the javadocs, as it's ref'ed elsewhere
@ -415,9 +399,6 @@ class RenderJavadocTask extends DefaultTask {
// - find all (enabled) tasks this tasks depends on (with same name), calling findRenderTasksInDependencies()
// - sort the tasks preferring those whose project name equals 'core', then lexigraphical by path
// - for each task get output dir to create relative or absolute link
// NOTE: explicitly exclude solr/test-framework, or attempting to link to lucene-test-framework because if we did javadoc would
// attempt to link class refs in in org.apache.lucene, causing broken links. (either broken links to things like "Directory" if
// lucene-test-framework was first, or broken links to things like LuceneTestCase if lucene-core was first)
findRenderTasksInDependencies()
.sort(false, Comparator.comparing { (it.project.name != 'core') as Boolean }.thenComparing(Comparator.comparing { it.path }))
.each { otherTask ->

View File

@ -17,6 +17,9 @@
// Configure miscellaneous aspects required for supporting the java module system layer.
// Debugging utilities.
apply from: buildscript.sourceFile.toPath().resolveSibling("modules-debugging.gradle")
allprojects {
plugins.withType(JavaPlugin) {
// We won't be using gradle's built-in automatic module finder.

View File

@ -167,10 +167,10 @@ allprojects {
if (project.path.endsWith(".tests")) {
// LUCENE-10301: for now, do not use the security manager for modular tests (test framework is not available).
} else if (project.path == ":lucene:replicator") {
systemProperty 'java.security.manager', "org.apache.lucene.util.TestSecurityManager"
systemProperty 'java.security.manager', "org.apache.lucene.tests.util.TestSecurityManager"
systemProperty 'java.security.policy', file("${resources}/policies/replicator-tests.policy")
} else if (project.path.startsWith(":lucene")) {
systemProperty 'java.security.manager', "org.apache.lucene.util.TestSecurityManager"
systemProperty 'java.security.manager', "org.apache.lucene.tests.util.TestSecurityManager"
systemProperty 'java.security.policy', file("${resources}/policies/tests.policy")
}

View File

@ -57,10 +57,7 @@ grant {
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
// needed by cyberneko usage by benchmarks on J9
permission java.lang.RuntimePermission "accessClassInPackage.org.apache.xerces.util";
// needed by org.apache.logging.log4j
permission java.lang.RuntimePermission "getenv.*";
permission java.lang.RuntimePermission "getClassLoader";
permission java.lang.RuntimePermission "setContextClassLoader";
// Needed for loading native library (lucene:misc:native) in lucene:misc
permission java.lang.RuntimePermission "getFileStoreAttributes";

View File

@ -46,6 +46,10 @@ API Changes
* LUCENE-10197: UnifiedHighlighter now has a Builder to construct it. The UH's setters are now
deprecated. (Animesh Pandey, David Smiley)
* LUCENE-10301: the test framework is now a module. All the classes have been moved from
org.apache.lucene.* to org.apache.lucene.tests.* to avoid package name conflicts with the
core module. (Dawid Weiss)
New Features
---------------------

View File

@ -27,6 +27,13 @@ behind the scenes. It is the responsibility of the caller to to call
## Migration from Lucene 9.0 to Lucene 9.1
### Test framework package migration and module (LUCENE-10301)
The test framework is now a module. All the classes have been moved from
`org.apache.lucene.*` to `org.apache.lucene.tests.*` to avoid package name conflicts
with the core module. If you were using the Lucene test framework, the migration should be
fairly automatic (package prefix).
### Minor syntactical changes in StandardQueryParser (LUCENE-10223)
Added interval functions and min-should-match support to `StandardQueryParser`. This

View File

@ -17,8 +17,8 @@
package org.apache.lucene.analysis.ar;
import java.io.IOException;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
/** Test the Arabic Analyzer */
public class TestArabicAnalyzer extends BaseTokenStreamTestCase {

View File

@ -18,9 +18,9 @@ package org.apache.lucene.analysis.ar;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the Arabic filter Factories are working. */
public class TestArabicFilters extends BaseTokenStreamFactoryTestCase {

View File

@ -19,10 +19,10 @@ package org.apache.lucene.analysis.ar;
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Test the Arabic Normalization Filter */
public class TestArabicNormalizationFilter extends BaseTokenStreamTestCase {

View File

@ -18,12 +18,12 @@ package org.apache.lucene.analysis.ar;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Test the Arabic Normalization Filter */
public class TestArabicStemFilter extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.bg;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
/** Test the Bulgarian analyzer */
public class TestBulgarianAnalyzer extends BaseTokenStreamTestCase {

View File

@ -18,9 +18,9 @@ package org.apache.lucene.analysis.bg;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the Bulgarian stem filter factory is working. */
public class TestBulgarianStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -19,12 +19,12 @@ package org.apache.lucene.analysis.bg;
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Test the Bulgarian Stemmer */
public class TestBulgarianStemmer extends BaseTokenStreamTestCase {

View File

@ -17,7 +17,7 @@
package org.apache.lucene.analysis.bn;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
/** Tests the BengaliAnalyzer */
public class TestBengaliAnalyzer extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.bn;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Test Bengali Filter Factory */
public class TestBengaliFilters extends BaseTokenStreamFactoryTestCase {

View File

@ -18,11 +18,11 @@ package org.apache.lucene.analysis.bn;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.util.TestUtil;
/** Test BengaliNormalizer */
public class TestBengaliNormalizer extends BaseTokenStreamTestCase {

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.bn;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
/** Test Codes for BengaliStemmer */
public class TestBengaliStemmer extends BaseTokenStreamTestCase {

View File

@ -16,10 +16,10 @@
*/
package org.apache.lucene.analysis.boost;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.search.BoostAttribute;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestDelimitedBoostTokenFilter extends BaseTokenStreamTestCase {

View File

@ -19,13 +19,13 @@ package org.apache.lucene.analysis.br;
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.core.LetterTokenizer;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
/**
* Test the Brazilian Stem Filter, which only modifies the term text.

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.br;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests to ensure the Brazilian stem filter factory is working. */
public class TestBrazilianStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.ca;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestCatalanAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the stopwords file is missing in classpath */

View File

@ -26,10 +26,10 @@ import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.tests.util.TestUtil;
public class TestHTMLStripCharFilter extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.charfilter;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure this factory is working */
public class TestHTMLStripCharFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -26,12 +26,12 @@ import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.tests.util.TestUtil;
import org.apache.lucene.util.UnicodeUtil;
public class TestMappingCharFilter extends BaseTokenStreamTestCase {

View File

@ -16,7 +16,7 @@
*/
package org.apache.lucene.analysis.charfilter;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
public class TestMappingCharFilterFactory extends BaseTokenStreamFactoryTestCase {
public void testParseString() throws Exception {

View File

@ -19,9 +19,7 @@ package org.apache.lucene.analysis.cjk;
import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
@ -31,6 +29,8 @@ import org.apache.lucene.analysis.charfilter.NormalizeCharMap;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Most tests adopted from TestCJKTokenizer */
public class TestCJKAnalyzer extends BaseTokenStreamTestCase {

View File

@ -18,9 +18,9 @@ package org.apache.lucene.analysis.cjk;
import java.util.Random;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.util.IOUtils;
public class TestCJKBigramFilter extends BaseTokenStreamTestCase {

View File

@ -18,9 +18,9 @@ package org.apache.lucene.analysis.cjk;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the CJK bigram factory is working. */
public class TestCJKBigramFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -21,11 +21,11 @@ import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
public class TestCJKWidthCharFilter extends BaseTokenStreamTestCase {
/** Full-width ASCII forms normalized to half-width (basic latin) */

View File

@ -19,8 +19,8 @@ package org.apache.lucene.analysis.cjk;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure {@link CJKWidthCharFilter} is working */
public class TestCJKWidthCharFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.cjk;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Tests for {@link CJKWidthFilter} */
public class TestCJKWidthFilter extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.cjk;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the CJKWidthFilterFactory is working */
public class TestCJKWidthFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.ckb;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
/** Test the Sorani analyzer */
public class TestSoraniAnalyzer extends BaseTokenStreamTestCase {

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.ckb;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Tests normalization for Sorani (this is more critical than stemming...) */
public class TestSoraniNormalizationFilter extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.ckb;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the Sorani normalization factory is working. */
public class TestSoraniNormalizationFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -16,15 +16,15 @@
*/
package org.apache.lucene.analysis.ckb;
import static org.apache.lucene.analysis.VocabularyAssert.assertVocabulary;
import static org.apache.lucene.tests.analysis.VocabularyAssert.assertVocabulary;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Test the Sorani Stemmer. */
public class TestSoraniStemFilter extends BaseTokenStreamTestCase {

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.ckb;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests to ensure the Sorani stem factory is working. */
public class TestSoraniStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -19,7 +19,6 @@ package org.apache.lucene.analysis.classic;
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
@ -33,6 +32,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.util.BytesRef;
/** tests for classicanalyzer */

View File

@ -18,9 +18,9 @@ package org.apache.lucene.analysis.classic;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the classic lucene factories are working. */
public class TestClassicFactories extends BaseTokenStreamFactoryTestCase {

View File

@ -19,13 +19,13 @@ package org.apache.lucene.analysis.commongrams;
import java.io.StringReader;
import java.util.Arrays;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Tests CommonGrams(Query)Filter */
public class TestCommonGramsFilter extends BaseTokenStreamTestCase {

View File

@ -17,11 +17,11 @@
package org.apache.lucene.analysis.commongrams;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.util.ClasspathResourceLoader;
import org.apache.lucene.util.ResourceLoader;
import org.apache.lucene.util.Version;

View File

@ -16,11 +16,11 @@
*/
package org.apache.lucene.analysis.commongrams;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.TestStopFilterFactory;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.util.ClasspathResourceLoader;
import org.apache.lucene.util.ResourceLoader;
import org.apache.lucene.util.Version;

View File

@ -21,9 +21,7 @@ import java.io.Reader;
import java.io.StringReader;
import java.util.Arrays;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
@ -32,6 +30,8 @@ import org.apache.lucene.analysis.charfilter.NormalizeCharMap;
import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.compound;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests to ensure the Dictionary compound filter factory is working. */
public class TestDictionaryCompoundWordTokenFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.compound;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests to ensure the Hyphenation compound filter factory is working. */
public class TestHyphenationCompoundWordTokenFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -29,24 +29,11 @@ import java.util.Set;
import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.CharFilterFactory;
import org.apache.lucene.analysis.CrankyTokenFilter;
import org.apache.lucene.analysis.MockCharFilter;
import org.apache.lucene.analysis.MockFixedLengthPayloadFilter;
import org.apache.lucene.analysis.MockGraphTokenFilter;
import org.apache.lucene.analysis.MockHoleInjectingTokenFilter;
import org.apache.lucene.analysis.MockLowerCaseFilter;
import org.apache.lucene.analysis.MockRandomLookaheadTokenFilter;
import org.apache.lucene.analysis.MockSynonymFilter;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.MockVariableLengthPayloadFilter;
import org.apache.lucene.analysis.SimplePayloadFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenFilterFactory;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.TokenizerFactory;
import org.apache.lucene.analysis.ValidatingTokenFilter;
import org.apache.lucene.analysis.miscellaneous.PatternKeywordMarkerFilter;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
@ -54,7 +41,20 @@ import org.apache.lucene.analysis.sinks.TeeSinkTokenFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.sr.SerbianNormalizationRegularFilter;
import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.tests.analysis.CrankyTokenFilter;
import org.apache.lucene.tests.analysis.MockCharFilter;
import org.apache.lucene.tests.analysis.MockFixedLengthPayloadFilter;
import org.apache.lucene.tests.analysis.MockGraphTokenFilter;
import org.apache.lucene.tests.analysis.MockHoleInjectingTokenFilter;
import org.apache.lucene.tests.analysis.MockLowerCaseFilter;
import org.apache.lucene.tests.analysis.MockRandomLookaheadTokenFilter;
import org.apache.lucene.tests.analysis.MockSynonymFilter;
import org.apache.lucene.tests.analysis.MockTokenFilter;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.tests.analysis.MockVariableLengthPayloadFilter;
import org.apache.lucene.tests.analysis.SimplePayloadFilter;
import org.apache.lucene.tests.analysis.ValidatingTokenFilter;
import org.apache.lucene.tests.util.LuceneTestCase;
import org.apache.lucene.util.ResourceLoader;
import org.apache.lucene.util.ResourceLoaderAware;
import org.apache.lucene.util.Version;

View File

@ -19,7 +19,6 @@ package org.apache.lucene.analysis.core;
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
@ -28,6 +27,7 @@ import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;

View File

@ -23,12 +23,8 @@ import java.nio.CharBuffer;
import java.util.Arrays;
import java.util.HashSet;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.MockCharFilter;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
@ -40,7 +36,11 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
import org.apache.lucene.analysis.shingle.ShingleFilter;
import org.apache.lucene.analysis.wikipedia.WikipediaTokenizer;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockCharFilter;
import org.apache.lucene.tests.analysis.MockTokenFilter;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs;
@SuppressCodecs("Direct")
public class TestBugInSomething extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.core;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the core lucene factories are working. */
public class TestCoreFactories extends BaseTokenStreamFactoryTestCase {

View File

@ -18,12 +18,12 @@ package org.apache.lucene.analysis.core;
import java.util.Random;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.tests.util.TestUtil;
import org.apache.lucene.util.SparseFixedBitSet;
import org.apache.lucene.util.TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.core;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the digit normalization factory is working. */
public class TestDecimalDigitFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -20,16 +20,16 @@ import java.io.Reader;
import java.io.StringReader;
import java.util.Random;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockReaderWrapper;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockAnalyzer;
import org.apache.lucene.tests.analysis.MockReaderWrapper;
import org.apache.lucene.tests.util.TestUtil;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.Operations;

View File

@ -27,17 +27,17 @@ import java.util.Map;
import java.util.Set;
import org.apache.lucene.analysis.AbstractAnalysisFactory;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharFilterFactory;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilterFactory;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.TokenizerFactory;
import org.apache.lucene.analysis.boost.DelimitedBoostTokenFilterFactory;
import org.apache.lucene.analysis.miscellaneous.DelimitedTermFrequencyTokenFilterFactory;
import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.tests.util.LuceneTestCase.Nightly;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.LuceneTestCase.Nightly;
import org.apache.lucene.util.ResourceLoaderAware;
import org.apache.lucene.util.Version;

View File

@ -26,17 +26,17 @@ import java.util.Random;
import java.util.stream.Collectors;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.AutomatonToTokenStream;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.TokenStreamToAutomaton;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.synonym.SynonymGraphFilter;
import org.apache.lucene.analysis.synonym.SynonymMap;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.CannedTokenStream;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.tests.analysis.Token;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.CharsRefBuilder;

View File

@ -18,7 +18,6 @@ package org.apache.lucene.analysis.core;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.document.Document;
@ -33,9 +32,10 @@ import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.util.TestUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {

View File

@ -20,8 +20,8 @@ import java.io.IOException;
import java.io.StringReader;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.util.AttributeFactory;
public class TestKeywordTokenizer extends BaseTokenStreamTestCase {

View File

@ -48,18 +48,13 @@ import java.util.function.Function;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.CharArrayMap;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.CrankyTokenFilter;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.ValidatingTokenFilter;
import org.apache.lucene.analysis.boost.DelimitedBoostTokenFilter;
import org.apache.lucene.analysis.charfilter.NormalizeCharMap;
import org.apache.lucene.analysis.cjk.CJKBigramFilter;
@ -95,14 +90,19 @@ import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.synonym.SynonymMap;
import org.apache.lucene.analysis.wikipedia.WikipediaTokenizer;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.CrankyTokenFilter;
import org.apache.lucene.tests.analysis.MockTokenFilter;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.tests.analysis.ValidatingTokenFilter;
import org.apache.lucene.tests.util.Rethrow;
import org.apache.lucene.tests.util.TestUtil;
import org.apache.lucene.tests.util.automaton.AutomatonTestUtil;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.Rethrow;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.Version;
import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.AutomatonTestUtil;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.Operations;
import org.apache.lucene.util.automaton.RegExp;

View File

@ -20,12 +20,12 @@ import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestStopAnalyzer extends BaseTokenStreamTestCase {

View File

@ -16,9 +16,9 @@
*/
package org.apache.lucene.analysis.core;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.util.ClasspathResourceLoader;
import org.apache.lucene.util.ResourceLoader;

View File

@ -20,13 +20,13 @@ import java.io.IOException;
import java.io.StringReader;
import java.util.Collections;
import java.util.Set;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.English;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.util.English;
public class TestTypeTokenFilter extends BaseTokenStreamTestCase {

View File

@ -17,9 +17,9 @@
package org.apache.lucene.analysis.core;
import java.util.Set;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.TokenFilterFactory;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.CannedTokenStream;
/** Testcase for {@link TypeTokenFilterFactory} */
public class TestTypeTokenFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -20,8 +20,8 @@ import java.io.IOException;
import java.io.StringReader;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.util.AttributeFactory;
public class TestUnicodeWhitespaceTokenizer extends BaseTokenStreamTestCase {

View File

@ -19,7 +19,7 @@ package org.apache.lucene.analysis.core;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestWhitespaceAnalyzer extends BaseTokenStreamTestCase {

View File

@ -23,7 +23,6 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.CharFilterFactory;
import org.apache.lucene.analysis.LowerCaseFilter;
@ -41,6 +40,7 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory;
import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilterFactory;
import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory;
import org.apache.lucene.analysis.standard.StandardTokenizerFactory;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.SetOnce.AlreadySetException;

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.cz;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
/**
* Test the CzechAnalyzer

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.cz;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests to ensure the Czech stem filter factory is working. */
public class TestCzechStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -19,12 +19,12 @@ package org.apache.lucene.analysis.cz;
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/**
* Test the Czech Stemmer.

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.da;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestDanishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the stopwords file is missing in classpath */

View File

@ -19,12 +19,12 @@ package org.apache.lucene.analysis.de;
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.LetterTokenizer;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestGermanAnalyzer extends BaseTokenStreamTestCase {
public void testReusableTokenStream() throws Exception {

View File

@ -16,17 +16,17 @@
*/
package org.apache.lucene.analysis.de;
import static org.apache.lucene.analysis.VocabularyAssert.*;
import static org.apache.lucene.tests.analysis.VocabularyAssert.*;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests for {@link GermanLightStemFilter} */
public class TestGermanLightStemFilter extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.de;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the German light stem factory is working. */
public class TestGermanLightStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -16,17 +16,17 @@
*/
package org.apache.lucene.analysis.de;
import static org.apache.lucene.analysis.VocabularyAssert.*;
import static org.apache.lucene.tests.analysis.VocabularyAssert.*;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests for {@link GermanMinimalStemFilter} */
public class TestGermanMinimalStemFilter extends BaseTokenStreamTestCase {

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.de;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests to ensure the German minimal stem factory is working. */
public class TestGermanMinimalStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,11 +18,11 @@ package org.apache.lucene.analysis.de;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Tests {@link GermanNormalizationFilter} */
public class TestGermanNormalizationFilter extends BaseTokenStreamTestCase {

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.de;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests to ensure the German normalization factory is working. */
public class TestGermanNormalizationFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -16,19 +16,19 @@
*/
package org.apache.lucene.analysis.de;
import static org.apache.lucene.analysis.VocabularyAssert.*;
import static org.apache.lucene.tests.analysis.VocabularyAssert.*;
import java.io.IOException;
import java.io.InputStream;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/**
* Test the German stemmer. The stemming algorithm is known to work less than perfect, as it doesn't

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.de;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests to ensure the German stem filter factory is working. */
public class TestGermanStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -17,7 +17,7 @@
package org.apache.lucene.analysis.el;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
/** A unit test class for verifying the correct operation of the GreekAnalyzer. */
public class TestGreekAnalyzer extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.el;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the Greek lowercase filter factory is working. */
public class TestGreekLowerCaseFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.el;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the Greek stem filter factory is working. */
public class TestGreekStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,9 +18,9 @@ package org.apache.lucene.analysis.el;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestGreekStemmer extends BaseTokenStreamTestCase {
private Analyzer a;

View File

@ -19,7 +19,7 @@ package org.apache.lucene.analysis.email;
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestUAX29URLEmailAnalyzer extends BaseTokenStreamTestCase {

View File

@ -30,15 +30,15 @@ import java.util.Random;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.EmojiTokenizationTestUnicode_12_1;
import org.apache.lucene.analysis.standard.WordBreakTestUnicode_12_1_0;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.standard.EmojiTokenizationTestUnicode_12_1;
import org.apache.lucene.tests.analysis.standard.WordBreakTestUnicode_12_1_0;
import org.apache.lucene.tests.util.TestUtil;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.email;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** A few tests based on org.apache.lucene.analysis.TestUAX29URLEmailTokenizer */
public class TestUAX29URLEmailTokenizerFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.en;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestEnglishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the stopwords file is missing in classpath */

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.en;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests for {@link EnglishMinimalStemFilter} */
public class TestEnglishMinimalStemFilter extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.en;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the English minimal stem factory is working. */
public class TestEnglishMinimalStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.en;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the kstem filter factory is working. */
public class TestKStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -16,14 +16,14 @@
*/
package org.apache.lucene.analysis.en;
import static org.apache.lucene.analysis.VocabularyAssert.assertVocabulary;
import static org.apache.lucene.tests.analysis.VocabularyAssert.assertVocabulary;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.junit.Ignore;
/** Tests for {@link KStemmer} */

View File

@ -16,18 +16,18 @@
*/
package org.apache.lucene.analysis.en;
import static org.apache.lucene.analysis.VocabularyAssert.*;
import static org.apache.lucene.tests.analysis.VocabularyAssert.*;
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Test the PorterStemFilter with Martin Porter's test data. */
public class TestPorterStemFilter extends BaseTokenStreamTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.en;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
/** Simple tests to ensure the Porter stem filter factory is working. */
public class TestPorterStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,8 +18,8 @@ package org.apache.lucene.analysis.es;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestSpanishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the stopwords file is missing in classpath */

View File

@ -16,14 +16,14 @@
*/
package org.apache.lucene.analysis.es;
import static org.apache.lucene.analysis.VocabularyAssert.*;
import static org.apache.lucene.tests.analysis.VocabularyAssert.*;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests for {@link SpanishLightStemFilter} */
public class TestSpanishLightStemFilter extends BaseTokenStreamTestCase {

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.es;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests to ensure the Spanish Light stem factory is working. */
public class TestSpanishLightStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -19,11 +19,11 @@ package org.apache.lucene.analysis.es;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.en.EnglishMinimalStemFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/**
* Simple tests for {@link SpanishMinimalStemFilter}

View File

@ -19,10 +19,10 @@ package org.apache.lucene.analysis.es;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/**
* Simple tests to ensure the spanish minimal stem factory is working.

View File

@ -16,14 +16,14 @@
*/
package org.apache.lucene.analysis.es;
import static org.apache.lucene.analysis.VocabularyAssert.assertVocabulary;
import static org.apache.lucene.tests.analysis.VocabularyAssert.assertVocabulary;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests for {@link SpanishPluralStemFilter} */
public class TestSpanishPluralStemFilter extends BaseTokenStreamTestCase {

View File

@ -18,10 +18,10 @@ package org.apache.lucene.analysis.es;
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
/** Simple tests to ensure the Spanish Plural stem factory is working. */
public class TestSpanishPluralStemFilterFactory extends BaseTokenStreamFactoryTestCase {

View File

@ -18,7 +18,7 @@ package org.apache.lucene.analysis.et;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
public class TestEstonianAnalyzer extends BaseTokenStreamTestCase {

Some files were not shown because too many files have changed in this diff Show More