Remove the remaining deprecated ctors from TokenStream API test base class (BaseTokenStreamTestCase). They were used to test old and new TokenStream API.

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@829244 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2009-10-23 21:21:17 +00:00
parent 3b50c11e42
commit 7902c4b729
6 changed files with 10 additions and 29 deletions

View File

@ -38,8 +38,7 @@ import org.apache.lucene.analysis.tokenattributes.*;
public class TestShingleMatrixFilter extends BaseTokenStreamTestCase { public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
public TestShingleMatrixFilter(String name) { public TestShingleMatrixFilter(String name) {
// use this ctor, because SingleTokenTokenStream only uses next(Token), so exclude it super(name);
super(name, new HashSet<String>(Arrays.asList("testBehavingAsShingleFilter", "testMatrix", "testIterator")));
} }
public void testIterator() throws IOException { public void testIterator() throws IOException {

View File

@ -26,9 +26,6 @@ import org.apache.lucene.util.LuceneTestCase;
/** /**
* Base class for all Lucene unit tests that use TokenStreams. * Base class for all Lucene unit tests that use TokenStreams.
* <p>
* This class runs all tests twice, one time with {@link TokenStream#setOnlyUseNewAPI} <code>false</code>
* and after that one time with <code>true</code>.
*/ */
public abstract class BaseTokenStreamTestCase extends LuceneTestCase { public abstract class BaseTokenStreamTestCase extends LuceneTestCase {
@ -40,16 +37,6 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase {
super(name); super(name);
} }
/** @deprecated */
public BaseTokenStreamTestCase(Set testWithNewAPI) {
super();
}
/** @deprecated */
public BaseTokenStreamTestCase(String name, Set testWithNewAPI) {
super(name);
}
// some helpers to test Analyzers and TokenStreams: // some helpers to test Analyzers and TokenStreams:
public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException { public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {

View File

@ -38,10 +38,10 @@ import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil; import org.apache.lucene.util._TestUtil;
public class TestDocumentWriter extends BaseTokenStreamTestCase { public class TestDocumentWriter extends LuceneTestCase {
private RAMDirectory dir; private RAMDirectory dir;
public TestDocumentWriter(String s) { public TestDocumentWriter(String s) {

View File

@ -32,7 +32,7 @@ import java.util.Map;
import java.util.HashSet; import java.util.HashSet;
import java.util.Random; import java.util.Random;
import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CachingTokenFilter; import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.SimpleAnalyzer;
@ -68,14 +68,9 @@ import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil; import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.Version; import org.apache.lucene.util.Version;
public class TestIndexWriter extends BaseTokenStreamTestCase { public class TestIndexWriter extends LuceneTestCase {
public TestIndexWriter(String name) { public TestIndexWriter(String name) {
super(name, new HashSet(Arrays.asList(new String[]{ super(name);
"testExceptionFromTokenStream", "testDocumentsWriterExceptions", "testNegativePositions",
"testEndOffsetPositionWithCachingTokenFilter", "testEndOffsetPositionWithTeeSinkTokenFilter",
"testEndOffsetPositionStandard", "testEndOffsetPositionStandardEmptyField",
"testEndOffsetPositionStandardEmptyField2"
})));
} }
public void testDocCount() throws IOException public void testDocCount() throws IOException

View File

@ -35,13 +35,13 @@ import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Version; import org.apache.lucene.util.Version;
/** /**
* Tests QueryParser. * Tests QueryParser.
*/ */
public class TestMultiFieldQueryParser extends BaseTokenStreamTestCase { public class TestMultiFieldQueryParser extends LuceneTestCase {
/** test stop words arsing for both the non static form, and for the /** test stop words arsing for both the non static form, and for the
* corresponding static form (qtxt, fields[]). */ * corresponding static form (qtxt, fields[]). */

View File

@ -42,7 +42,6 @@ import org.apache.lucene.index.TermPositions;
import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.LowerCaseTokenizer;
import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.index.Payload; import org.apache.lucene.index.Payload;
@ -52,6 +51,7 @@ import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.search.spans.Spans; import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.util.Version; import org.apache.lucene.util.Version;
import org.apache.lucene.util.LuceneTestCase;
/** /**
* Term position unit test. * Term position unit test.
@ -59,7 +59,7 @@ import org.apache.lucene.util.Version;
* *
* @version $Revision$ * @version $Revision$
*/ */
public class TestPositionIncrement extends BaseTokenStreamTestCase { public class TestPositionIncrement extends LuceneTestCase {
public void testSetPosition() throws Exception { public void testSetPosition() throws Exception {
Analyzer analyzer = new Analyzer() { Analyzer analyzer = new Analyzer() {