LUCENE-1825: Incorrect usage of AttributeSource.addAttribute/getAttribute leads to failures when onlyUseNewAPI=true

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@806844 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2009-08-22 12:04:35 +00:00
parent f409117a6f
commit 23d976abbd
32 changed files with 142 additions and 69 deletions

View File

@ -156,8 +156,8 @@ public class QueryScorer implements Scorer {
*/
public TokenStream init(TokenStream tokenStream) throws IOException {
position = -1;
termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class);
posIncAtt = (PositionIncrementAttribute) tokenStream.getAttribute(PositionIncrementAttribute.class);
termAtt = (TermAttribute) tokenStream.addAttribute(TermAttribute.class);
posIncAtt = (PositionIncrementAttribute) tokenStream.addAttribute(PositionIncrementAttribute.class);
if(!skipInitExtractor) {
if(fieldWeightedSpanTerms != null) {
fieldWeightedSpanTerms.clear();

View File

@ -95,7 +95,7 @@ public class QueryTermScorer implements Scorer {
* @see org.apache.lucene.search.highlight.Scorer#init(org.apache.lucene.analysis.TokenStream)
*/
public TokenStream init(TokenStream tokenStream) {
termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class);
termAtt = (TermAttribute) tokenStream.addAttribute(TermAttribute.class);
return null;
}

View File

@ -47,7 +47,7 @@ public class SimpleFragmenter implements Fragmenter {
* @see org.apache.lucene.search.highlight.Fragmenter#start(java.lang.String, org.apache.lucene.analysis.TokenStream)
*/
public void start(String originalText, TokenStream stream) {
offsetAtt = (OffsetAttribute) stream.getAttribute(OffsetAttribute.class);
offsetAtt = (OffsetAttribute) stream.addAttribute(OffsetAttribute.class);
currentNumFrags = 1;
}

View File

@ -101,8 +101,8 @@ public class SimpleSpanFragmenter implements Fragmenter {
position = -1;
currentNumFrags = 1;
textSize = originalText.length();
termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class);
posIncAtt = (PositionIncrementAttribute) tokenStream.getAttribute(PositionIncrementAttribute.class);
offsetAtt = (OffsetAttribute) tokenStream.getAttribute(OffsetAttribute.class);
termAtt = (TermAttribute) tokenStream.addAttribute(TermAttribute.class);
posIncAtt = (PositionIncrementAttribute) tokenStream.addAttribute(PositionIncrementAttribute.class);
offsetAtt = (OffsetAttribute) tokenStream.addAttribute(OffsetAttribute.class);
}
}

View File

@ -41,8 +41,8 @@ public class TokenGroup {
private TermAttribute termAtt;
public TokenGroup(TokenStream tokenStream) {
offsetAtt = (OffsetAttribute) tokenStream.getAttribute(OffsetAttribute.class);
termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class);
offsetAtt = (OffsetAttribute) tokenStream.addAttribute(OffsetAttribute.class);
termAtt = (TermAttribute) tokenStream.addAttribute(TermAttribute.class);
}
void addToken(float score) {

View File

@ -33,8 +33,6 @@ import java.util.StringTokenizer;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import junit.framework.TestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer;
import org.apache.lucene.analysis.SimpleAnalyzer;
@ -78,6 +76,7 @@ import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
@ -85,7 +84,7 @@ import org.w3c.dom.NodeList;
* JUnit Test for Highlighter class.
*
*/
public class HighlighterTest extends TestCase implements Formatter {
public class HighlighterTest extends BaseTokenStreamTestCase implements Formatter {
private IndexReader reader;
static final String FIELD_NAME = "contents";
private Query query;
@ -1600,10 +1599,8 @@ public class HighlighterTest extends TestCase implements Formatter {
}
}
/*
* @see TestCase#setUp()
*/
protected void setUp() throws Exception {
super.setUp();
ramDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(ramDir, new StandardAnalyzer(), true);
for (int i = 0; i < texts.length; i++) {
@ -1624,9 +1621,6 @@ public class HighlighterTest extends TestCase implements Formatter {
}
/*
* @see TestCase#tearDown()
*/
protected void tearDown() throws Exception {
super.tearDown();
}
@ -1692,9 +1686,9 @@ class SynonymTokenizer extends TokenStream {
public SynonymTokenizer(TokenStream realStream, Map synonyms) {
this.realStream = realStream;
this.synonyms = synonyms;
realTermAtt = (TermAttribute) realStream.getAttribute(TermAttribute.class);
realPosIncrAtt = (PositionIncrementAttribute) realStream.getAttribute(PositionIncrementAttribute.class);
realOffsetAtt = (OffsetAttribute) realStream.getAttribute(OffsetAttribute.class);
realTermAtt = (TermAttribute) realStream.addAttribute(TermAttribute.class);
realPosIncrAtt = (PositionIncrementAttribute) realStream.addAttribute(PositionIncrementAttribute.class);
realOffsetAtt = (OffsetAttribute) realStream.addAttribute(OffsetAttribute.class);
termAtt = (TermAttribute) addAttribute(TermAttribute.class);
posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);

View File

@ -33,8 +33,7 @@ import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import junit.framework.TestCase;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.StopAnalyzer;
@ -198,7 +197,7 @@ the^3
</pre>
*/
public class MemoryIndexTest extends TestCase {
public class MemoryIndexTest extends BaseTokenStreamTestCase {
private Analyzer analyzer;
private boolean fastMode = false;
@ -214,7 +213,8 @@ public class MemoryIndexTest extends TestCase {
/* all files will be open relative to this */
public String fileDir;
public void setUp() {
protected void setUp() throws Exception {
super.setUp();
fileDir = System.getProperty("lucene.common.dir", null);
}

View File

@ -31,8 +31,7 @@ import java.util.List;
import java.util.Set;
import java.util.regex.Pattern;
import junit.framework.TestCase;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.LetterTokenizer;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.StopAnalyzer;
@ -58,8 +57,9 @@ Lucene features: CharTokenizer.MAX_WORD_LEN = 255.
Thus the PatternAnalyzer produces correct output, whereas the WhitespaceAnalyzer
silently truncates text, and so the comparison results in assertEquals() don't match up.
TODO: Convert to new TokenStream API!
*/
public class PatternAnalyzerTest extends TestCase {
public class PatternAnalyzerTest extends LuceneTestCase {
/** Runs the tests and/or benchmark */
public static void main(String[] args) throws Throwable {

View File

@ -31,10 +31,9 @@ import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import junit.framework.TestCase;
public class TestSynonymTokenFilter extends TestCase {
public class TestSynonymTokenFilter extends BaseTokenStreamTestCase {
File dataDir = new File(System.getProperty("dataDir", "./bin"));
File testFile = new File(dataDir, "org/apache/lucene/index/memory/testSynonyms.txt");

View File

@ -249,7 +249,7 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
private boolean doNextCall;
void start(Fieldable f) {
termAtt = (TermAttribute) fieldState.attributeSource.getAttribute(TermAttribute.class);
termAtt = (TermAttribute) fieldState.attributeSource.addAttribute(TermAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);

View File

@ -61,7 +61,7 @@ public class QueryTermVector implements TermFreqVector {
boolean hasMoreTokens = false;
stream.reset();
TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class);
hasMoreTokens = stream.incrementToken();
while (hasMoreTokens) {

View File

@ -249,7 +249,11 @@ public class AttributeSource {
* <p>Signature for Java 1.5: <code>public &lt;T extends Attribute&gt; T getAttribute(Class&lt;T&gt;)</code>
*
* @throws IllegalArgumentException if this AttributeSource does not contain the
* Attribute
* Attribute. It is recommended to always use {@link #addAttribute} even in consumers
* of TokenStreams, because you cannot know if a specific TokenStream really uses
* a specific Attribute. {@link #addAttribute} will automatically make the attribute
* available. If you want to only use the attribute, if it is available (to optimize
* consuming), use {@link #hasAttribute}.
*/
public Attribute getAttribute(Class attClass) {
final Attribute att = (Attribute) this.attributes.get(attClass);

View File

@ -0,0 +1,84 @@
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Set;
import org.apache.lucene.util.LuceneTestCase;
/**
* Base class for all Lucene unit tests that use TokenStreams.
* <p>
* This class runs all tests twice, one time with {@link TokenStream#setOnlyUseNewAPI} <code>false</code>
* and after that one time with <code>true</code>.
*/
public abstract class BaseTokenStreamTestCase extends LuceneTestCase {
private boolean onlyUseNewAPI = false;
private final Set testWithNewAPI;
public BaseTokenStreamTestCase() {
super();
this.testWithNewAPI = null; // run all tests also with onlyUseNewAPI
}
public BaseTokenStreamTestCase(String name) {
super(name);
this.testWithNewAPI = null; // run all tests also with onlyUseNewAPI
}
public BaseTokenStreamTestCase(Set testWithNewAPI) {
super();
this.testWithNewAPI = testWithNewAPI;
}
public BaseTokenStreamTestCase(String name, Set testWithNewAPI) {
super(name);
this.testWithNewAPI = testWithNewAPI;
}
// @Override
protected void setUp() throws Exception {
super.setUp();
TokenStream.setOnlyUseNewAPI(onlyUseNewAPI);
}
// @Override
public void runBare() throws Throwable {
// Do the test with onlyUseNewAPI=false (default)
try {
onlyUseNewAPI = false;
super.runBare();
} catch (Throwable e) {
System.out.println("Test failure of "+getName()+" occurred with onlyUseNewAPI=false");
throw e;
}
if (testWithNewAPI == null || testWithNewAPI.contains(getName())) {
// Do the test again with onlyUseNewAPI=true
try {
onlyUseNewAPI = true;
super.runBare();
} catch (Throwable e) {
System.out.println("Test failure of "+getName()+" occurred with onlyUseNewAPI=true");
throw e;
}
}
}
}

View File

@ -25,6 +25,7 @@ import java.util.List;
import org.apache.lucene.util.LuceneTestCase;
/* TODO: Convert to new TokenStream API. Token instances must be removed for that to work */
public abstract class BaseTokenTestCase extends LuceneTestCase {
public static String tsToString(TokenStream in) throws IOException {
StringBuffer out = new StringBuffer();

View File

@ -18,14 +18,13 @@ package org.apache.lucene.analysis;
*/
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.LuceneTestCase;
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
import java.util.Iterator;
public class TestASCIIFoldingFilter extends LuceneTestCase {
public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase {
// testLain1Accents() is a copy of TestLatin1AccentFilter.testU().
public void testLatin1Accents() throws Exception {

View File

@ -26,9 +26,8 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.Payload;
import org.apache.lucene.util.LuceneTestCase;
public class TestAnalyzers extends LuceneTestCase {
public class TestAnalyzers extends BaseTokenStreamTestCase {
public TestAnalyzers(String name) {
super(name);

View File

@ -20,8 +20,6 @@ package org.apache.lucene.analysis;
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
@ -34,7 +32,7 @@ import org.apache.lucene.index.TermPositions;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
public class TestCachingTokenFilter extends LuceneTestCase {
public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
public void testCaching() throws IOException {

View File

@ -18,11 +18,10 @@ package org.apache.lucene.analysis;
*/
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.LuceneTestCase;
import java.io.StringReader;
public class TestISOLatin1AccentFilter extends LuceneTestCase {
public class TestISOLatin1AccentFilter extends BaseTokenStreamTestCase {
public void testU() throws Exception {
TokenStream stream = new WhitespaceTokenizer(new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream);

View File

@ -31,9 +31,8 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
public class TestKeywordAnalyzer extends LuceneTestCase {
public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
private RAMDirectory directory;
private IndexSearcher searcher;

View File

@ -18,11 +18,10 @@ package org.apache.lucene.analysis;
*/
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.LuceneTestCase;
import java.io.StringReader;
public class TestLengthFilter extends LuceneTestCase {
public class TestLengthFilter extends BaseTokenStreamTestCase {
public void testFilter() throws Exception {
TokenStream stream = new WhitespaceTokenizer(

View File

@ -17,12 +17,11 @@ package org.apache.lucene.analysis;
* limitations under the License.
*/
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
public class TestNumericTokenStream extends LuceneTestCase {
public class TestNumericTokenStream extends BaseTokenStreamTestCase {
static final long lvalue = 4573245871874382L;
static final int ivalue = 123456;

View File

@ -3,7 +3,6 @@ package org.apache.lucene.analysis;
import java.io.StringReader;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.LuceneTestCase;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -22,7 +21,7 @@ import org.apache.lucene.util.LuceneTestCase;
* limitations under the License.
*/
public class TestPerFieldAnalzyerWrapper extends LuceneTestCase {
public class TestPerFieldAnalzyerWrapper extends BaseTokenStreamTestCase {
public void testPerField() throws Exception {
String text = "Qwerty";
PerFieldAnalyzerWrapper analyzer =

View File

@ -5,7 +5,6 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.LuceneTestCase;
import java.io.StringReader;
@ -25,7 +24,7 @@ import java.io.StringReader;
* limitations under the License.
*/
public class TestStandardAnalyzer extends LuceneTestCase {
public class TestStandardAnalyzer extends BaseTokenStreamTestCase {
private Analyzer a = new StandardAnalyzer();

View File

@ -19,7 +19,6 @@ package org.apache.lucene.analysis;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.LuceneTestCase;
import java.io.StringReader;
import java.io.IOException;
@ -27,7 +26,7 @@ import java.util.Iterator;
import java.util.Set;
import java.util.HashSet;
public class TestStopAnalyzer extends LuceneTestCase {
public class TestStopAnalyzer extends BaseTokenStreamTestCase {
private StopAnalyzer stop = new StopAnalyzer(false);
private Set inValidTokens = new HashSet();

View File

@ -19,7 +19,6 @@ package org.apache.lucene.analysis;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import java.io.StringReader;
@ -27,7 +26,7 @@ import java.util.ArrayList;
import java.util.Set;
public class TestStopFilter extends LuceneTestCase {
public class TestStopFilter extends BaseTokenStreamTestCase {
private final static boolean VERBOSE = false;

View File

@ -22,7 +22,6 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import java.io.StringReader;
@ -32,7 +31,7 @@ import java.util.List;
/**
* tests for the TestTeeSinkTokenFilter
*/
public class TestTeeSinkTokenFilter extends LuceneTestCase {
public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
protected StringBuffer buffer1;
protected StringBuffer buffer2;
protected String[] tokens1;

View File

@ -38,10 +38,10 @@ import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.util._TestUtil;
public class TestDocumentWriter extends LuceneTestCase {
public class TestDocumentWriter extends BaseTokenStreamTestCase {
private RAMDirectory dir;
public TestDocumentWriter(String s) {

View File

@ -30,7 +30,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
/**
* Test QueryParser's ability to deal with Analyzers that return more
@ -38,7 +38,7 @@ import org.apache.lucene.util.LuceneTestCase;
* increment &gt; 1.
*
*/
public class TestMultiAnalyzer extends LuceneTestCase {
public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
private static int multiToken = 0;

View File

@ -35,12 +35,12 @@ import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
/**
* Tests QueryParser.
*/
public class TestMultiFieldQueryParser extends LuceneTestCase {
public class TestMultiFieldQueryParser extends BaseTokenStreamTestCase {
/** test stop words arsing for both the non static form, and for the
* corresponding static form (qtxt, fields[]). */

View File

@ -58,12 +58,12 @@ import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
/**
* Tests QueryParser.
*/
public class TestQueryParser extends LuceneTestCase {
public class TestQueryParser extends BaseTokenStreamTestCase {
public static Analyzer qpAnalyzer = new QPTestAnalyzer();

View File

@ -40,7 +40,7 @@ import org.apache.lucene.index.TermPositions;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.LowerCaseTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.index.Payload;
@ -56,7 +56,7 @@ import org.apache.lucene.search.spans.Spans;
*
* @version $Revision$
*/
public class TestPositionIncrement extends LuceneTestCase {
public class TestPositionIncrement extends BaseTokenStreamTestCase {
public void testSetPosition() throws Exception {
Analyzer analyzer = new Analyzer() {

View File

@ -24,6 +24,7 @@ import java.util.Random;
import junit.framework.TestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.FieldCache.CacheEntry;
@ -58,7 +59,9 @@ public abstract class LuceneTestCase extends TestCase {
}
protected void setUp() throws Exception {
super.setUp();
ConcurrentMergeScheduler.setTestMode();
TokenStream.setOnlyUseNewAPI(false);
}
/**
@ -96,6 +99,7 @@ public abstract class LuceneTestCase extends TestCase {
} finally {
purgeFieldCache(FieldCache.DEFAULT);
}
super.tearDown();
}
/**