diff --git a/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java b/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java index 4b74f0ba4fb..7fa87f34543 100644 --- a/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java +++ b/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java @@ -156,8 +156,8 @@ public class QueryScorer implements Scorer { */ public TokenStream init(TokenStream tokenStream) throws IOException { position = -1; - termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class); - posIncAtt = (PositionIncrementAttribute) tokenStream.getAttribute(PositionIncrementAttribute.class); + termAtt = (TermAttribute) tokenStream.addAttribute(TermAttribute.class); + posIncAtt = (PositionIncrementAttribute) tokenStream.addAttribute(PositionIncrementAttribute.class); if(!skipInitExtractor) { if(fieldWeightedSpanTerms != null) { fieldWeightedSpanTerms.clear(); diff --git a/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java b/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java index 632cb301f47..be992aced3a 100644 --- a/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java +++ b/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java @@ -95,7 +95,7 @@ public class QueryTermScorer implements Scorer { * @see org.apache.lucene.search.highlight.Scorer#init(org.apache.lucene.analysis.TokenStream) */ public TokenStream init(TokenStream tokenStream) { - termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class); + termAtt = (TermAttribute) tokenStream.addAttribute(TermAttribute.class); return null; } diff --git a/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleFragmenter.java b/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleFragmenter.java index f5a6d8bf242..285cb7984c3 100644 --- a/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleFragmenter.java +++ b/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleFragmenter.java @@ -47,7 +47,7 @@ public class SimpleFragmenter implements Fragmenter { * @see org.apache.lucene.search.highlight.Fragmenter#start(java.lang.String, org.apache.lucene.analysis.TokenStream) */ public void start(String originalText, TokenStream stream) { - offsetAtt = (OffsetAttribute) stream.getAttribute(OffsetAttribute.class); + offsetAtt = (OffsetAttribute) stream.addAttribute(OffsetAttribute.class); currentNumFrags = 1; } diff --git a/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java b/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java index 46598d391ea..05feb2ee0c3 100644 --- a/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java +++ b/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java @@ -101,8 +101,8 @@ public class SimpleSpanFragmenter implements Fragmenter { position = -1; currentNumFrags = 1; textSize = originalText.length(); - termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class); - posIncAtt = (PositionIncrementAttribute) tokenStream.getAttribute(PositionIncrementAttribute.class); - offsetAtt = (OffsetAttribute) tokenStream.getAttribute(OffsetAttribute.class); + termAtt = (TermAttribute) tokenStream.addAttribute(TermAttribute.class); + posIncAtt = (PositionIncrementAttribute) tokenStream.addAttribute(PositionIncrementAttribute.class); + offsetAtt = (OffsetAttribute) tokenStream.addAttribute(OffsetAttribute.class); } } diff --git a/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java b/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java index 03dc523f002..5843deb6063 100644 --- a/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java +++ b/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java @@ -41,8 +41,8 @@ public class TokenGroup { private TermAttribute termAtt; public TokenGroup(TokenStream tokenStream) { - offsetAtt = (OffsetAttribute) tokenStream.getAttribute(OffsetAttribute.class); - termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class); + offsetAtt = (OffsetAttribute) tokenStream.addAttribute(OffsetAttribute.class); + termAtt = (TermAttribute) tokenStream.addAttribute(TermAttribute.class); } void addToken(float score) { diff --git a/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java index e59b1eec16d..2f3aace40df 100644 --- a/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java +++ b/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java @@ -33,8 +33,6 @@ import java.util.StringTokenizer; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; -import junit.framework.TestCase; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.SimpleAnalyzer; @@ -78,6 +76,7 @@ import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.Version; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.w3c.dom.Element; import org.w3c.dom.NodeList; @@ -85,7 +84,7 @@ import org.w3c.dom.NodeList; * JUnit Test for Highlighter class. * */ -public class HighlighterTest extends TestCase implements Formatter { +public class HighlighterTest extends BaseTokenStreamTestCase implements Formatter { private IndexReader reader; static final String FIELD_NAME = "contents"; private Query query; @@ -1600,10 +1599,8 @@ public class HighlighterTest extends TestCase implements Formatter { } } - /* - * @see TestCase#setUp() - */ protected void setUp() throws Exception { + super.setUp(); ramDir = new RAMDirectory(); IndexWriter writer = new IndexWriter(ramDir, new StandardAnalyzer(), true); for (int i = 0; i < texts.length; i++) { @@ -1624,9 +1621,6 @@ public class HighlighterTest extends TestCase implements Formatter { } - /* - * @see TestCase#tearDown() - */ protected void tearDown() throws Exception { super.tearDown(); } @@ -1692,9 +1686,9 @@ class SynonymTokenizer extends TokenStream { public SynonymTokenizer(TokenStream realStream, Map synonyms) { this.realStream = realStream; this.synonyms = synonyms; - realTermAtt = (TermAttribute) realStream.getAttribute(TermAttribute.class); - realPosIncrAtt = (PositionIncrementAttribute) realStream.getAttribute(PositionIncrementAttribute.class); - realOffsetAtt = (OffsetAttribute) realStream.getAttribute(OffsetAttribute.class); + realTermAtt = (TermAttribute) realStream.addAttribute(TermAttribute.class); + realPosIncrAtt = (PositionIncrementAttribute) realStream.addAttribute(PositionIncrementAttribute.class); + realOffsetAtt = (OffsetAttribute) realStream.addAttribute(OffsetAttribute.class); termAtt = (TermAttribute) addAttribute(TermAttribute.class); posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class); diff --git a/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java b/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java index 139e2a74129..500c2e5f562 100644 --- a/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java +++ b/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java @@ -33,8 +33,7 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Set; -import junit.framework.TestCase; - +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.StopAnalyzer; @@ -198,7 +197,7 @@ the^3 */ -public class MemoryIndexTest extends TestCase { +public class MemoryIndexTest extends BaseTokenStreamTestCase { private Analyzer analyzer; private boolean fastMode = false; @@ -214,7 +213,8 @@ public class MemoryIndexTest extends TestCase { /* all files will be open relative to this */ public String fileDir; - public void setUp() { + protected void setUp() throws Exception { + super.setUp(); fileDir = System.getProperty("lucene.common.dir", null); } diff --git a/contrib/memory/src/test/org/apache/lucene/index/memory/PatternAnalyzerTest.java b/contrib/memory/src/test/org/apache/lucene/index/memory/PatternAnalyzerTest.java index 009fbb7ca6c..ff0d1ae25dc 100644 --- a/contrib/memory/src/test/org/apache/lucene/index/memory/PatternAnalyzerTest.java +++ b/contrib/memory/src/test/org/apache/lucene/index/memory/PatternAnalyzerTest.java @@ -31,8 +31,7 @@ import java.util.List; import java.util.Set; import java.util.regex.Pattern; -import junit.framework.TestCase; - +import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.LetterTokenizer; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.StopAnalyzer; @@ -58,8 +57,9 @@ Lucene features: CharTokenizer.MAX_WORD_LEN = 255. Thus the PatternAnalyzer produces correct output, whereas the WhitespaceAnalyzer silently truncates text, and so the comparison results in assertEquals() don't match up. +TODO: Convert to new TokenStream API! */ -public class PatternAnalyzerTest extends TestCase { +public class PatternAnalyzerTest extends LuceneTestCase { /** Runs the tests and/or benchmark */ public static void main(String[] args) throws Throwable { diff --git a/contrib/memory/src/test/org/apache/lucene/index/memory/TestSynonymTokenFilter.java b/contrib/memory/src/test/org/apache/lucene/index/memory/TestSynonymTokenFilter.java index efbb413bf26..30056565170 100644 --- a/contrib/memory/src/test/org/apache/lucene/index/memory/TestSynonymTokenFilter.java +++ b/contrib/memory/src/test/org/apache/lucene/index/memory/TestSynonymTokenFilter.java @@ -31,10 +31,9 @@ import org.apache.lucene.analysis.WhitespaceTokenizer; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; -import junit.framework.TestCase; - -public class TestSynonymTokenFilter extends TestCase { +public class TestSynonymTokenFilter extends BaseTokenStreamTestCase { File dataDir = new File(System.getProperty("dataDir", "./bin")); File testFile = new File(dataDir, "org/apache/lucene/index/memory/testSynonyms.txt"); diff --git a/src/java/org/apache/lucene/index/TermsHashPerField.java b/src/java/org/apache/lucene/index/TermsHashPerField.java index 27b550cd6c3..1c49d4facb7 100644 --- a/src/java/org/apache/lucene/index/TermsHashPerField.java +++ b/src/java/org/apache/lucene/index/TermsHashPerField.java @@ -249,7 +249,7 @@ final class TermsHashPerField extends InvertedDocConsumerPerField { private boolean doNextCall; void start(Fieldable f) { - termAtt = (TermAttribute) fieldState.attributeSource.getAttribute(TermAttribute.class); + termAtt = (TermAttribute) fieldState.attributeSource.addAttribute(TermAttribute.class); consumer.start(f); if (nextPerField != null) { nextPerField.start(f); diff --git a/src/java/org/apache/lucene/search/QueryTermVector.java b/src/java/org/apache/lucene/search/QueryTermVector.java index 5e74dd84f43..ef92f945582 100644 --- a/src/java/org/apache/lucene/search/QueryTermVector.java +++ b/src/java/org/apache/lucene/search/QueryTermVector.java @@ -61,7 +61,7 @@ public class QueryTermVector implements TermFreqVector { boolean hasMoreTokens = false; stream.reset(); - TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class); + TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class); hasMoreTokens = stream.incrementToken(); while (hasMoreTokens) { diff --git a/src/java/org/apache/lucene/util/AttributeSource.java b/src/java/org/apache/lucene/util/AttributeSource.java index 7a29b7e0419..a587555a145 100644 --- a/src/java/org/apache/lucene/util/AttributeSource.java +++ b/src/java/org/apache/lucene/util/AttributeSource.java @@ -249,7 +249,11 @@ public class AttributeSource { *

Signature for Java 1.5: public <T extends Attribute> T getAttribute(Class<T>) * * @throws IllegalArgumentException if this AttributeSource does not contain the - * Attribute + * Attribute. It is recommended to always use {@link #addAttribute} even in consumers + * of TokenStreams, because you cannot know if a specific TokenStream really uses + * a specific Attribute. {@link #addAttribute} will automatically make the attribute + * available. If you want to only use the attribute, if it is available (to optimize + * consuming), use {@link #hasAttribute}. */ public Attribute getAttribute(Class attClass) { final Attribute att = (Attribute) this.attributes.get(attClass); diff --git a/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java b/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java new file mode 100644 index 00000000000..e814e60a8ba --- /dev/null +++ b/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java @@ -0,0 +1,84 @@ +package org.apache.lucene.analysis; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Set; + +import org.apache.lucene.util.LuceneTestCase; + +/** + * Base class for all Lucene unit tests that use TokenStreams. + *

+ * This class runs all tests twice, one time with {@link TokenStream#setOnlyUseNewAPI} false + * and after that one time with true. + */ +public abstract class BaseTokenStreamTestCase extends LuceneTestCase { + + private boolean onlyUseNewAPI = false; + private final Set testWithNewAPI; + + public BaseTokenStreamTestCase() { + super(); + this.testWithNewAPI = null; // run all tests also with onlyUseNewAPI + } + + public BaseTokenStreamTestCase(String name) { + super(name); + this.testWithNewAPI = null; // run all tests also with onlyUseNewAPI + } + + public BaseTokenStreamTestCase(Set testWithNewAPI) { + super(); + this.testWithNewAPI = testWithNewAPI; + } + + public BaseTokenStreamTestCase(String name, Set testWithNewAPI) { + super(name); + this.testWithNewAPI = testWithNewAPI; + } + + // @Override + protected void setUp() throws Exception { + super.setUp(); + TokenStream.setOnlyUseNewAPI(onlyUseNewAPI); + } + + // @Override + public void runBare() throws Throwable { + // Do the test with onlyUseNewAPI=false (default) + try { + onlyUseNewAPI = false; + super.runBare(); + } catch (Throwable e) { + System.out.println("Test failure of "+getName()+" occurred with onlyUseNewAPI=false"); + throw e; + } + + if (testWithNewAPI == null || testWithNewAPI.contains(getName())) { + // Do the test again with onlyUseNewAPI=true + try { + onlyUseNewAPI = true; + super.runBare(); + } catch (Throwable e) { + System.out.println("Test failure of "+getName()+" occurred with onlyUseNewAPI=true"); + throw e; + } + } + } + +} diff --git a/src/test/org/apache/lucene/analysis/BaseTokenTestCase.java b/src/test/org/apache/lucene/analysis/BaseTokenTestCase.java index 1259d6138d7..816d7615276 100644 --- a/src/test/org/apache/lucene/analysis/BaseTokenTestCase.java +++ b/src/test/org/apache/lucene/analysis/BaseTokenTestCase.java @@ -25,6 +25,7 @@ import java.util.List; import org.apache.lucene.util.LuceneTestCase; +/* TODO: Convert to new TokenStream API. Token instances must be removed for that to work */ public abstract class BaseTokenTestCase extends LuceneTestCase { public static String tsToString(TokenStream in) throws IOException { StringBuffer out = new StringBuffer(); diff --git a/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java b/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java index d2c949f6533..2f16d964216 100644 --- a/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java +++ b/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java @@ -18,14 +18,13 @@ package org.apache.lucene.analysis; */ import org.apache.lucene.analysis.tokenattributes.TermAttribute; -import org.apache.lucene.util.LuceneTestCase; import java.io.StringReader; import java.util.List; import java.util.ArrayList; import java.util.Iterator; -public class TestASCIIFoldingFilter extends LuceneTestCase { +public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase { // testLain1Accents() is a copy of TestLatin1AccentFilter.testU(). public void testLatin1Accents() throws Exception { diff --git a/src/test/org/apache/lucene/analysis/TestAnalyzers.java b/src/test/org/apache/lucene/analysis/TestAnalyzers.java index 7b0107a6e07..c43d3bfe502 100644 --- a/src/test/org/apache/lucene/analysis/TestAnalyzers.java +++ b/src/test/org/apache/lucene/analysis/TestAnalyzers.java @@ -26,9 +26,8 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.index.Payload; -import org.apache.lucene.util.LuceneTestCase; -public class TestAnalyzers extends LuceneTestCase { +public class TestAnalyzers extends BaseTokenStreamTestCase { public TestAnalyzers(String name) { super(name); diff --git a/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java index bd8589ba430..22aa3827429 100644 --- a/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java +++ b/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java @@ -20,8 +20,6 @@ package org.apache.lucene.analysis; import java.io.IOException; -import org.apache.lucene.util.LuceneTestCase; - import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.document.Document; @@ -34,7 +32,7 @@ import org.apache.lucene.index.TermPositions; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; -public class TestCachingTokenFilter extends LuceneTestCase { +public class TestCachingTokenFilter extends BaseTokenStreamTestCase { private String[] tokens = new String[] {"term1", "term2", "term3", "term2"}; public void testCaching() throws IOException { diff --git a/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java b/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java index ae0147fad6f..8a6b7c63dd0 100644 --- a/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java +++ b/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java @@ -18,11 +18,10 @@ package org.apache.lucene.analysis; */ import org.apache.lucene.analysis.tokenattributes.TermAttribute; -import org.apache.lucene.util.LuceneTestCase; import java.io.StringReader; -public class TestISOLatin1AccentFilter extends LuceneTestCase { +public class TestISOLatin1AccentFilter extends BaseTokenStreamTestCase { public void testU() throws Exception { TokenStream stream = new WhitespaceTokenizer(new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl")); ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream); diff --git a/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java b/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java index bca62723a51..e19ab4f1a5f 100644 --- a/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java +++ b/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java @@ -31,9 +31,8 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.LuceneTestCase; -public class TestKeywordAnalyzer extends LuceneTestCase { +public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { private RAMDirectory directory; private IndexSearcher searcher; diff --git a/src/test/org/apache/lucene/analysis/TestLengthFilter.java b/src/test/org/apache/lucene/analysis/TestLengthFilter.java index c44d20afd27..eb0c0b2102b 100644 --- a/src/test/org/apache/lucene/analysis/TestLengthFilter.java +++ b/src/test/org/apache/lucene/analysis/TestLengthFilter.java @@ -18,11 +18,10 @@ package org.apache.lucene.analysis; */ import org.apache.lucene.analysis.tokenattributes.TermAttribute; -import org.apache.lucene.util.LuceneTestCase; import java.io.StringReader; -public class TestLengthFilter extends LuceneTestCase { +public class TestLengthFilter extends BaseTokenStreamTestCase { public void testFilter() throws Exception { TokenStream stream = new WhitespaceTokenizer( diff --git a/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java b/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java index 37abbb26529..a6d2c984001 100644 --- a/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java +++ b/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java @@ -17,12 +17,11 @@ package org.apache.lucene.analysis; * limitations under the License. */ -import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute; -public class TestNumericTokenStream extends LuceneTestCase { +public class TestNumericTokenStream extends BaseTokenStreamTestCase { static final long lvalue = 4573245871874382L; static final int ivalue = 123456; diff --git a/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java b/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java index 1a037aacba3..92f3fba5d1a 100644 --- a/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java +++ b/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java @@ -3,7 +3,6 @@ package org.apache.lucene.analysis; import java.io.StringReader; import org.apache.lucene.analysis.tokenattributes.TermAttribute; -import org.apache.lucene.util.LuceneTestCase; /** * Licensed to the Apache Software Foundation (ASF) under one or more @@ -22,7 +21,7 @@ import org.apache.lucene.util.LuceneTestCase; * limitations under the License. */ -public class TestPerFieldAnalzyerWrapper extends LuceneTestCase { +public class TestPerFieldAnalzyerWrapper extends BaseTokenStreamTestCase { public void testPerField() throws Exception { String text = "Qwerty"; PerFieldAnalyzerWrapper analyzer = diff --git a/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java b/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java index 43c24bac1f2..c0f23964eab 100644 --- a/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java +++ b/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java @@ -5,7 +5,6 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute; -import org.apache.lucene.util.LuceneTestCase; import java.io.StringReader; @@ -25,7 +24,7 @@ import java.io.StringReader; * limitations under the License. */ -public class TestStandardAnalyzer extends LuceneTestCase { +public class TestStandardAnalyzer extends BaseTokenStreamTestCase { private Analyzer a = new StandardAnalyzer(); diff --git a/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java b/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java index c9e8bf2e2e8..2677eb2ac68 100644 --- a/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java +++ b/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java @@ -19,7 +19,6 @@ package org.apache.lucene.analysis; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; -import org.apache.lucene.util.LuceneTestCase; import java.io.StringReader; import java.io.IOException; @@ -27,7 +26,7 @@ import java.util.Iterator; import java.util.Set; import java.util.HashSet; -public class TestStopAnalyzer extends LuceneTestCase { +public class TestStopAnalyzer extends BaseTokenStreamTestCase { private StopAnalyzer stop = new StopAnalyzer(false); private Set inValidTokens = new HashSet(); diff --git a/src/test/org/apache/lucene/analysis/TestStopFilter.java b/src/test/org/apache/lucene/analysis/TestStopFilter.java index b096dd26eb4..98675f48c13 100644 --- a/src/test/org/apache/lucene/analysis/TestStopFilter.java +++ b/src/test/org/apache/lucene/analysis/TestStopFilter.java @@ -19,7 +19,6 @@ package org.apache.lucene.analysis; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.util.English; -import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; import java.io.StringReader; @@ -27,7 +26,7 @@ import java.util.ArrayList; import java.util.Set; -public class TestStopFilter extends LuceneTestCase { +public class TestStopFilter extends BaseTokenStreamTestCase { private final static boolean VERBOSE = false; diff --git a/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java b/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java index f9b0485dcfa..5d6a4e10842 100644 --- a/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java +++ b/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java @@ -22,7 +22,6 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.English; -import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; import java.io.StringReader; @@ -32,7 +31,7 @@ import java.util.List; /** * tests for the TestTeeSinkTokenFilter */ -public class TestTeeSinkTokenFilter extends LuceneTestCase { +public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase { protected StringBuffer buffer1; protected StringBuffer buffer2; protected String[] tokens1; diff --git a/src/test/org/apache/lucene/index/TestDocumentWriter.java b/src/test/org/apache/lucene/index/TestDocumentWriter.java index 6eb8aa7f267..394f126f213 100644 --- a/src/test/org/apache/lucene/index/TestDocumentWriter.java +++ b/src/test/org/apache/lucene/index/TestDocumentWriter.java @@ -38,10 +38,10 @@ import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.AttributeSource; -import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.util._TestUtil; -public class TestDocumentWriter extends LuceneTestCase { +public class TestDocumentWriter extends BaseTokenStreamTestCase { private RAMDirectory dir; public TestDocumentWriter(String s) { diff --git a/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java b/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java index 528a2c65d2f..a48721c36e4 100644 --- a/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java +++ b/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java @@ -30,7 +30,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute; import org.apache.lucene.search.Query; -import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; /** * Test QueryParser's ability to deal with Analyzers that return more @@ -38,7 +38,7 @@ import org.apache.lucene.util.LuceneTestCase; * increment > 1. * */ -public class TestMultiAnalyzer extends LuceneTestCase { +public class TestMultiAnalyzer extends BaseTokenStreamTestCase { private static int multiToken = 0; diff --git a/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java b/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java index 80d04f2d696..fd36564269e 100644 --- a/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java +++ b/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java @@ -35,12 +35,12 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; /** * Tests QueryParser. */ -public class TestMultiFieldQueryParser extends LuceneTestCase { +public class TestMultiFieldQueryParser extends BaseTokenStreamTestCase { /** test stop words arsing for both the non static form, and for the * corresponding static form (qtxt, fields[]). */ diff --git a/src/test/org/apache/lucene/queryParser/TestQueryParser.java b/src/test/org/apache/lucene/queryParser/TestQueryParser.java index 30ce3d38d9c..0be650e4a99 100644 --- a/src/test/org/apache/lucene/queryParser/TestQueryParser.java +++ b/src/test/org/apache/lucene/queryParser/TestQueryParser.java @@ -58,12 +58,12 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; /** * Tests QueryParser. */ -public class TestQueryParser extends LuceneTestCase { +public class TestQueryParser extends BaseTokenStreamTestCase { public static Analyzer qpAnalyzer = new QPTestAnalyzer(); diff --git a/src/test/org/apache/lucene/search/TestPositionIncrement.java b/src/test/org/apache/lucene/search/TestPositionIncrement.java index 4056c2dfba5..df2ed36c88f 100644 --- a/src/test/org/apache/lucene/search/TestPositionIncrement.java +++ b/src/test/org/apache/lucene/search/TestPositionIncrement.java @@ -40,7 +40,7 @@ import org.apache.lucene.index.TermPositions; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.index.Payload; @@ -56,7 +56,7 @@ import org.apache.lucene.search.spans.Spans; * * @version $Revision$ */ -public class TestPositionIncrement extends LuceneTestCase { +public class TestPositionIncrement extends BaseTokenStreamTestCase { public void testSetPosition() throws Exception { Analyzer analyzer = new Analyzer() { diff --git a/src/test/org/apache/lucene/util/LuceneTestCase.java b/src/test/org/apache/lucene/util/LuceneTestCase.java index 724d84685d1..28430b4bdda 100644 --- a/src/test/org/apache/lucene/util/LuceneTestCase.java +++ b/src/test/org/apache/lucene/util/LuceneTestCase.java @@ -24,6 +24,7 @@ import java.util.Random; import junit.framework.TestCase; +import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.index.ConcurrentMergeScheduler; import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.FieldCache.CacheEntry; @@ -58,7 +59,9 @@ public abstract class LuceneTestCase extends TestCase { } protected void setUp() throws Exception { + super.setUp(); ConcurrentMergeScheduler.setTestMode(); + TokenStream.setOnlyUseNewAPI(false); } /** @@ -96,6 +99,7 @@ public abstract class LuceneTestCase extends TestCase { } finally { purgeFieldCache(FieldCache.DEFAULT); } + super.tearDown(); } /**