diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java
index 4896fa8b9c6..20a9e7af2ec 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java
@@ -138,7 +138,7 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource
};
}
- try {
+ try (Analyzer a = analyzer) {
String formatClass = format;
if (format == null || format.equals("solr")) {
formatClass = SolrSynonymParser.class.getName();
@@ -146,7 +146,7 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource
formatClass = WordnetSynonymParser.class.getName();
}
// TODO: expose dedup as a parameter?
- map = loadSynonyms(loader, formatClass, true, analyzer);
+ map = loadSynonyms(loader, formatClass, true, a);
} catch (ParseException e) {
throw new IOException("Error parsing synonyms file:", e);
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java
index 49275c9328c..95a636baad2 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java
@@ -31,7 +31,7 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new ArabicAnalyzer();
+ new ArabicAnalyzer().close();
}
/**
@@ -53,6 +53,7 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
assertAnalyzesTo(a, "ما ملكت أيمانكم", new String[] { "ملكت", "ايمانكم"});
assertAnalyzesTo(a, "الذين ملكت أيمانكم", new String[] { "ملكت", "ايمانكم" }); // stopwords
+ a.close();
}
/**
@@ -62,14 +63,17 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
ArabicAnalyzer a = new ArabicAnalyzer();
assertAnalyzesTo(a, "كبير", new String[] { "كبير" });
assertAnalyzesTo(a, "كبيرة", new String[] { "كبير" }); // feminine marker
+ a.close();
}
/**
* Non-arabic text gets treated in a similar way as SimpleAnalyzer.
*/
public void testEnglishInput() throws Exception {
- assertAnalyzesTo(new ArabicAnalyzer(), "English text.", new String[] {
+ ArabicAnalyzer a = new ArabicAnalyzer();
+ assertAnalyzesTo(a, "English text.", new String[] {
"english", "text" });
+ a.close();
}
/**
@@ -80,6 +84,7 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
ArabicAnalyzer a = new ArabicAnalyzer(set);
assertAnalyzesTo(a, "The quick brown fox.", new String[] { "quick",
"brown", "fox" });
+ a.close();
}
public void testWithStemExclusionSet() throws IOException {
@@ -87,15 +92,18 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
ArabicAnalyzer a = new ArabicAnalyzer(CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهدهات" });
assertAnalyzesTo(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهدهات" });
-
+ a.close();
a = new ArabicAnalyzer(CharArraySet.EMPTY_SET, CharArraySet.EMPTY_SET);
assertAnalyzesTo(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهد" });
assertAnalyzesTo(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهد" });
+ a.close();
}
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new ArabicAnalyzer(), 1000*RANDOM_MULTIPLIER);
+ ArabicAnalyzer a = new ArabicAnalyzer();
+ checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java
index 49a749471de..b3ddfb0069b 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java
@@ -104,6 +104,7 @@ public class TestArabicNormalizationFilter extends BaseTokenStreamTestCase {
}
};
checkOneTerm(a, "", "");
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java
index 8768e290350..09169bfc0e1 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java
@@ -141,5 +141,6 @@ public class TestArabicStemFilter extends BaseTokenStreamTestCase {
}
};
checkOneTerm(a, "", "");
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java
index dd50be254a9..a5b82a87c04 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java
@@ -32,24 +32,27 @@ public class TestBulgarianAnalyzer extends BaseTokenStreamTestCase {
* This test fails with NPE when the stopwords file is missing in classpath
*/
public void testResourcesAvailable() {
- new BulgarianAnalyzer();
+ new BulgarianAnalyzer().close();
}
public void testStopwords() throws IOException {
Analyzer a = new BulgarianAnalyzer();
assertAnalyzesTo(a, "Как се казваш?", new String[] {"казваш"});
+ a.close();
}
public void testCustomStopwords() throws IOException {
Analyzer a = new BulgarianAnalyzer(CharArraySet.EMPTY_SET);
assertAnalyzesTo(a, "Как се казваш?",
new String[] {"как", "се", "казваш"});
+ a.close();
}
public void testReusableTokenStream() throws IOException {
Analyzer a = new BulgarianAnalyzer();
assertAnalyzesTo(a, "документи", new String[] {"документ"});
assertAnalyzesTo(a, "документ", new String[] {"документ"});
+ a.close();
}
/**
@@ -64,6 +67,7 @@ public class TestBulgarianAnalyzer extends BaseTokenStreamTestCase {
assertAnalyzesTo(a, "компютър", new String[] {"компютр"});
assertAnalyzesTo(a, "градове", new String[] {"град"});
+ a.close();
}
public void testWithStemExclusionSet() throws IOException {
@@ -71,10 +75,13 @@ public class TestBulgarianAnalyzer extends BaseTokenStreamTestCase {
set.add("строеве");
Analyzer a = new BulgarianAnalyzer(CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "строевете строеве", new String[] { "строй", "строеве" });
+ a.close();
}
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new BulgarianAnalyzer(), 1000*RANDOM_MULTIPLIER);
+ BulgarianAnalyzer a = new BulgarianAnalyzer();
+ checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java
index 5054ff5e074..476c156900b 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java
@@ -97,6 +97,8 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
assertAnalyzesTo(a, "братя", new String[] {"брат"});
assertAnalyzesTo(a, "братята", new String[] {"брат"});
assertAnalyzesTo(a, "брате", new String[] {"брат"});
+
+ a.close();
}
/**
@@ -109,6 +111,8 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
assertAnalyzesTo(a, "вестта", new String[] {"вест"});
assertAnalyzesTo(a, "вести", new String[] {"вест"});
assertAnalyzesTo(a, "вестите", new String[] {"вест"});
+
+ a.close();
}
/**
@@ -138,6 +142,8 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
assertAnalyzesTo(a, "изключенията", new String[] {"изключени"});
/* note the below form in this example does not conflate with the rest */
assertAnalyzesTo(a, "изключения", new String[] {"изключн"});
+
+ a.close();
}
/**
@@ -154,6 +160,7 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
assertAnalyzesTo(a, "красивото", new String[] {"красив"});
assertAnalyzesTo(a, "красиви", new String[] {"красив"});
assertAnalyzesTo(a, "красивите", new String[] {"красив"});
+ a.close();
}
/**
@@ -212,6 +219,8 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
/* note the below forms conflate with each other, but not the rest */
assertAnalyzesTo(a, "строя", new String[] {"стр"});
assertAnalyzesTo(a, "строят", new String[] {"стр"});
+
+ a.close();
}
public void testWithKeywordAttribute() throws IOException {
@@ -234,5 +243,6 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
}
};
checkOneTerm(a, "", "");
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianAnalyzer.java
index a96cbfb8393..23500d30d23 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianAnalyzer.java
@@ -135,12 +135,14 @@ public class TestBrazilianAnalyzer extends BaseTokenStreamTestCase {
checkReuse(a, "boainain", "boainain");
checkReuse(a, "boas", "boas");
checkReuse(a, "bôas", "boas"); // removes diacritic: different from snowball portugese
+ a.close();
}
public void testStemExclusionTable() throws Exception {
BrazilianAnalyzer a = new BrazilianAnalyzer(
CharArraySet.EMPTY_SET, new CharArraySet(asSet("quintessência"), false));
checkReuse(a, "quintessência", "quintessência"); // excluded words will be completely unchanged.
+ a.close();
}
public void testWithKeywordAttribute() throws IOException {
@@ -154,7 +156,9 @@ public class TestBrazilianAnalyzer extends BaseTokenStreamTestCase {
}
private void check(final String input, final String expected) throws Exception {
- checkOneTerm(new BrazilianAnalyzer(), input, expected);
+ BrazilianAnalyzer a = new BrazilianAnalyzer();
+ checkOneTerm(a, input, expected);
+ a.close();
}
private void checkReuse(Analyzer a, String input, String expected) throws Exception {
@@ -163,7 +167,9 @@ public class TestBrazilianAnalyzer extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new BrazilianAnalyzer(), 1000*RANDOM_MULTIPLIER);
+ BrazilianAnalyzer a = new BrazilianAnalyzer();
+ checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
+ a.close();
}
public void testEmptyTerm() throws IOException {
@@ -175,5 +181,6 @@ public class TestBrazilianAnalyzer extends BaseTokenStreamTestCase {
}
};
checkOneTerm(a, "", "");
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java
index bc14adc7897..661f49aba2c 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java
@@ -27,7 +27,7 @@ public class TestCatalanAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new CatalanAnalyzer();
+ new CatalanAnalyzer().close();
}
/** test stopwords and stemming */
@@ -38,6 +38,7 @@ public class TestCatalanAnalyzer extends BaseTokenStreamTestCase {
checkOneTerm(a, "llengua", "llengu");
// stopword
assertAnalyzesTo(a, "un", new String[] { });
+ a.close();
}
/** test use of elisionfilter */
@@ -45,6 +46,7 @@ public class TestCatalanAnalyzer extends BaseTokenStreamTestCase {
Analyzer a = new CatalanAnalyzer();
assertAnalyzesTo(a, "Diccionari de l'Institut d'Estudis Catalans",
new String[] { "diccion", "inst", "estud", "catalan" });
+ a.close();
}
/** test use of exclusion set */
@@ -53,10 +55,13 @@ public class TestCatalanAnalyzer extends BaseTokenStreamTestCase {
Analyzer a = new CatalanAnalyzer(CatalanAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTerm(a, "llengües", "llengües");
checkOneTerm(a, "llengua", "llengu");
+ a.close();
}
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new CatalanAnalyzer(), 1000*RANDOM_MULTIPLIER);
+ CatalanAnalyzer a = new CatalanAnalyzer();
+ checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java
index ce1429a8f30..fb07ceea3d5 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java
@@ -25,7 +25,6 @@ import java.io.StringReader;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.HashSet;
-import java.util.List;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
@@ -403,16 +402,22 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
public void testRandom() throws Exception {
int numRounds = RANDOM_MULTIPLIER * 1000;
- checkRandomData(random(), newTestAnalyzer(), numRounds);
+ Analyzer a = newTestAnalyzer();
+ checkRandomData(random(), a, numRounds);
+ a.close();
}
public void testRandomHugeStrings() throws Exception {
int numRounds = RANDOM_MULTIPLIER * 100;
- checkRandomData(random(), newTestAnalyzer(), numRounds, 8192);
+ Analyzer a = newTestAnalyzer();
+ checkRandomData(random(), a, numRounds, 8192);
+ a.close();
}
public void testCloseBR() throws Exception {
- checkAnalysisConsistency(random(), newTestAnalyzer(), random().nextBoolean(), " Secretary) [[M");
+ Analyzer a = newTestAnalyzer();
+ checkAnalysisConsistency(random(), a, random().nextBoolean(), " Secretary) [[M");
+ a.close();
}
public void testServerSideIncludes() throws Exception {
@@ -549,7 +554,9 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
public void testRandomBrokenHTML() throws Exception {
int maxNumElements = 10000;
String text = TestUtil.randomHtmlishString(random(), maxNumElements);
- checkAnalysisConsistency(random(), newTestAnalyzer(), random().nextBoolean(), text);
+ Analyzer a = newTestAnalyzer();
+ checkAnalysisConsistency(random(), a, random().nextBoolean(), text);
+ a.close();
}
public void testRandomText() throws Exception {
@@ -617,6 +624,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
assertAnalyzesTo(analyzer, " ", new String[] { "\uFFFD" } );
assertAnalyzesTo(analyzer, " ", new String[] { "\uFFFD" } );
assertAnalyzesTo(analyzer, "
", new String[] { "" } );
+ analyzer.close();
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java
index e6f5e95c847..40a845c060a 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java
@@ -216,6 +216,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
int numRounds = RANDOM_MULTIPLIER * 10000;
checkRandomData(random(), analyzer, numRounds);
+ analyzer.close();
}
//@Ignore("wrong finalOffset: https://issues.apache.org/jira/browse/LUCENE-3971")
@@ -242,6 +243,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
String text = "gzw f quaxot";
checkAnalysisConsistency(random(), analyzer, false, text);
+ analyzer.close();
}
//@Ignore("wrong finalOffset: https://issues.apache.org/jira/browse/LUCENE-3971")
@@ -263,6 +265,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
};
int numRounds = 100;
checkRandomData(random(), analyzer, numRounds);
+ analyzer.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java
index fc25c5496c0..71a1a698fe0 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis.cjk;
import java.io.IOException;
import java.io.Reader;
-import java.util.Random;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
@@ -39,7 +38,19 @@ import org.apache.lucene.analysis.util.CharArraySet;
* Most tests adopted from TestCJKTokenizer
*/
public class TestCJKAnalyzer extends BaseTokenStreamTestCase {
- private Analyzer analyzer = new CJKAnalyzer();
+ private Analyzer analyzer;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ analyzer = new CJKAnalyzer();
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ analyzer.close();
+ super.tearDown();
+ }
public void testJa1() throws IOException {
assertAnalyzesTo(analyzer, "一二三四五六七八九十",
@@ -228,6 +239,8 @@ public class TestCJKAnalyzer extends BaseTokenStreamTestCase {
// before bigramming, the 4 tokens look like:
// { 0, 0, 1, 1 },
// { 0, 1, 1, 2 }
+
+ analyzer.close();
}
private static class FakeStandardTokenizer extends TokenFilter {
@@ -267,17 +280,21 @@ public class TestCJKAnalyzer extends BaseTokenStreamTestCase {
new int[] { 1 },
new String[] { "" },
new int[] { 1 });
+ analyzer.close();
}
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new CJKAnalyzer(), 1000*RANDOM_MULTIPLIER);
+ Analyzer a = new CJKAnalyzer();
+ checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
+ a.close();
}
/** blast some random strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
- Random random = random();
- checkRandomData(random, new CJKAnalyzer(), 100*RANDOM_MULTIPLIER, 8192);
+ Analyzer a = new CJKAnalyzer();
+ checkRandomData(random(), a, 100*RANDOM_MULTIPLIER, 8192);
+ a.close();
}
public void testEmptyTerm() throws IOException {
@@ -289,5 +306,6 @@ public class TestCJKAnalyzer extends BaseTokenStreamTestCase {
}
};
checkOneTerm(a, "", "");
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilter.java
index 08684009d8e..8f81756c7ee 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilter.java
@@ -17,31 +17,42 @@ package org.apache.lucene.analysis.cjk;
* limitations under the License.
*/
-import java.io.Reader;
import java.util.Random;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.util.IOUtils;
public class TestCJKBigramFilter extends BaseTokenStreamTestCase {
- Analyzer analyzer = new Analyzer() {
- @Override
- protected TokenStreamComponents createComponents(String fieldName) {
- Tokenizer t = new StandardTokenizer();
- return new TokenStreamComponents(t, new CJKBigramFilter(t));
- }
- };
+ Analyzer analyzer, unibiAnalyzer;
- Analyzer unibiAnalyzer = new Analyzer() {
- @Override
- protected TokenStreamComponents createComponents(String fieldName) {
- Tokenizer t = new StandardTokenizer();
- return new TokenStreamComponents(t,
- new CJKBigramFilter(t, 0xff, true));
- }
- };
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ return new TokenStreamComponents(t, new CJKBigramFilter(t));
+ }
+ };
+ unibiAnalyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ return new TokenStreamComponents(t,
+ new CJKBigramFilter(t, 0xff, true));
+ }
+ };
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ IOUtils.close(analyzer, unibiAnalyzer);
+ super.tearDown();
+ }
public void testHuge() throws Exception {
assertAnalyzesTo(analyzer, "多くの学生が試験に落ちた" + "多くの学生が試験に落ちた" + "多くの学生が試験に落ちた"
@@ -79,6 +90,7 @@ public class TestCJKBigramFilter extends BaseTokenStreamTestCase {
"", "", "", "", "" },
new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 });
+ a.close();
}
public void testAllScripts() throws Exception {
@@ -92,6 +104,7 @@ public class TestCJKBigramFilter extends BaseTokenStreamTestCase {
};
assertAnalyzesTo(a, "多くの学生が試験に落ちた。",
new String[] { "多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" });
+ a.close();
}
public void testUnigramsAndBigramsAllScripts() throws Exception {
@@ -132,6 +145,7 @@ public class TestCJKBigramFilter extends BaseTokenStreamTestCase {
"", "", "", "", "" },
new int[] { 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1 },
new int[] { 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1 });
+ a.close();
}
public void testUnigramsAndBigramsHuge() throws Exception {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java
index 733c4e1ae79..0e6f2907cea 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java
@@ -29,13 +29,25 @@ import org.apache.lucene.analysis.core.KeywordTokenizer;
* Tests for {@link CJKWidthFilter}
*/
public class TestCJKWidthFilter extends BaseTokenStreamTestCase {
- private Analyzer analyzer = new Analyzer() {
- @Override
- protected TokenStreamComponents createComponents(String fieldName) {
- Tokenizer source = new MockTokenizer(MockTokenizer.WHITESPACE, false);
- return new TokenStreamComponents(source, new CJKWidthFilter(source));
- }
- };
+ private Analyzer analyzer;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer source = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+ return new TokenStreamComponents(source, new CJKWidthFilter(source));
+ }
+ };
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ analyzer.close();
+ super.tearDown();
+ }
/**
* Full-width ASCII forms normalized to half-width (basic latin)
@@ -74,5 +86,6 @@ public class TestCJKWidthFilter extends BaseTokenStreamTestCase {
}
};
checkOneTerm(a, "", "");
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniAnalyzer.java
index 9a2c9d9969a..e3942c14368 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniAnalyzer.java
@@ -32,24 +32,27 @@ public class TestSoraniAnalyzer extends BaseTokenStreamTestCase {
* This test fails with NPE when the stopwords file is missing in classpath
*/
public void testResourcesAvailable() {
- new SoraniAnalyzer();
+ new SoraniAnalyzer().close();
}
public void testStopwords() throws IOException {
Analyzer a = new SoraniAnalyzer();
assertAnalyzesTo(a, "ئەم پیاوە", new String[] {"پیاو"});
+ a.close();
}
public void testCustomStopwords() throws IOException {
Analyzer a = new SoraniAnalyzer(CharArraySet.EMPTY_SET);
assertAnalyzesTo(a, "ئەم پیاوە",
new String[] {"ئەم", "پیاو"});
+ a.close();
}
public void testReusableTokenStream() throws IOException {
Analyzer a = new SoraniAnalyzer();
assertAnalyzesTo(a, "پیاوە", new String[] {"پیاو"});
assertAnalyzesTo(a, "پیاو", new String[] {"پیاو"});
+ a.close();
}
public void testWithStemExclusionSet() throws IOException {
@@ -57,10 +60,13 @@ public class TestSoraniAnalyzer extends BaseTokenStreamTestCase {
set.add("پیاوە");
Analyzer a = new SoraniAnalyzer(CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "پیاوە", new String[] { "پیاوە" });
+ a.close();
}
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new SoraniAnalyzer(), 1000*RANDOM_MULTIPLIER);
+ Analyzer a = new SoraniAnalyzer();
+ checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniNormalizationFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniNormalizationFilter.java
index a91b0d5eba9..4e8f3529694 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniNormalizationFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniNormalizationFilter.java
@@ -18,7 +18,6 @@ package org.apache.lucene.analysis.ckb;
*/
import java.io.IOException;
-import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
@@ -30,13 +29,25 @@ import org.apache.lucene.analysis.core.KeywordTokenizer;
* Tests normalization for Sorani (this is more critical than stemming...)
*/
public class TestSoraniNormalizationFilter extends BaseTokenStreamTestCase {
- Analyzer a = new Analyzer() {
- @Override
- protected TokenStreamComponents createComponents(String fieldName) {
- Tokenizer tokenizer = new MockTokenizer(MockTokenizer.KEYWORD, false);
- return new TokenStreamComponents(tokenizer, new SoraniNormalizationFilter(tokenizer));
- }
- };
+ Analyzer a;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ a = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer tokenizer = new MockTokenizer(MockTokenizer.KEYWORD, false);
+ return new TokenStreamComponents(tokenizer, new SoraniNormalizationFilter(tokenizer));
+ }
+ };
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ a.close();
+ super.tearDown();
+ }
public void testY() throws Exception {
checkOneTerm(a, "\u064A", "\u06CC");
@@ -96,5 +107,6 @@ public class TestSoraniNormalizationFilter extends BaseTokenStreamTestCase {
}
};
checkOneTerm(a, "", "");
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniStemFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniStemFilter.java
index bf98fa659b7..9b867e57e22 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniStemFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ckb/TestSoraniStemFilter.java
@@ -20,7 +20,6 @@ package org.apache.lucene.analysis.ckb;
import static org.apache.lucene.analysis.VocabularyAssert.assertVocabulary;
import java.io.IOException;
-import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
@@ -31,7 +30,19 @@ import org.apache.lucene.analysis.core.KeywordTokenizer;
* Test the Sorani Stemmer.
*/
public class TestSoraniStemFilter extends BaseTokenStreamTestCase {
- SoraniAnalyzer a = new SoraniAnalyzer();
+ Analyzer a;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ a = new SoraniAnalyzer();
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ a.close();
+ super.tearDown();
+ }
public void testIndefiniteSingular() throws Exception {
checkOneTerm(a, "پیاوێک", "پیاو"); // -ek
@@ -90,6 +101,7 @@ public class TestSoraniStemFilter extends BaseTokenStreamTestCase {
}
};
checkOneTerm(a, "", "");
+ a.close();
}
/** test against a basic vocabulary file */
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java
index e8afb5eaaf2..409adf743cb 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java
@@ -156,6 +156,7 @@ public class CommonGramsFilterTest extends BaseTokenStreamTestCase {
new String[] { "s_s", "s_s" });
assertAnalyzesTo(a, "of the of",
new String[] { "of_the", "the_of" });
+ a.close();
}
public void testCommonGramsFilter() throws Exception {
@@ -242,6 +243,7 @@ public class CommonGramsFilterTest extends BaseTokenStreamTestCase {
assertAnalyzesTo(a, "of the of",
new String[] { "of", "of_the", "the", "the_of", "of" },
new int[] { 1, 0, 1, 0, 1 });
+ a.close();
}
/**
@@ -330,6 +332,7 @@ public class CommonGramsFilterTest extends BaseTokenStreamTestCase {
};
checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
+ a.close();
Analyzer b = new Analyzer() {
@@ -342,5 +345,6 @@ public class CommonGramsFilterTest extends BaseTokenStreamTestCase {
};
checkRandomData(random(), b, 1000*RANDOM_MULTIPLIER);
+ b.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
index d547e634351..d9d2f01f678 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
@@ -336,6 +336,7 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
new String[] { "bankueberfall", "fall" },
new int[] { 0, 0 },
new int[] { 12, 12 });
+ analyzer.close();
}
/** blast some random strings through the analyzer */
@@ -350,6 +351,7 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
}
};
checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
+ a.close();
InputSource is = new InputSource(getClass().getResource("da_UTF8.xml").toExternalForm());
final HyphenationTree hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
@@ -363,6 +365,7 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
}
};
checkRandomData(random(), b, 1000*RANDOM_MULTIPLIER);
+ b.close();
}
public void testEmptyTerm() throws Exception {
@@ -376,6 +379,7 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
}
};
checkOneTerm(a, "", "");
+ a.close();
InputSource is = new InputSource(getClass().getResource("da_UTF8.xml").toExternalForm());
final HyphenationTree hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
@@ -389,5 +393,6 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
}
};
checkOneTerm(b, "", "");
+ b.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java
index 24b9629e527..8381e27d0c2 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java
@@ -18,15 +18,18 @@ package org.apache.lucene.analysis.core;
*/
import java.io.IOException;
-import java.io.Reader;
import java.io.StringReader;
-import java.util.Random;
-import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
public class TestAnalyzers extends BaseTokenStreamTestCase {
@@ -48,6 +51,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
new String[] { "b" });
assertAnalyzesTo(a, "\"QUOTED\" word",
new String[] { "quoted", "word" });
+ a.close();
}
public void testNull() throws Exception {
@@ -68,6 +72,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
new String[] { "2B" });
assertAnalyzesTo(a, "\"QUOTED\" word",
new String[] { "\"QUOTED\"", "word" });
+ a.close();
}
public void testStop() throws Exception {
@@ -76,6 +81,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
new String[] { "foo", "bar", "foo", "bar" });
assertAnalyzesTo(a, "foo a bar such FOO THESE BAR",
new String[] { "foo", "bar", "foo", "bar" });
+ a.close();
}
void verifyPayload(TokenStream ts) throws IOException {
@@ -159,6 +165,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
// unpaired trail surrogate
assertAnalyzesTo(a, "AbaC\uDC16AdaBa",
new String [] { "abac\uDC16adaba" });
+ a.close();
}
/**
@@ -179,9 +186,9 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
// unpaired trail surrogate
assertAnalyzesTo(a, "AbaC\uDC16AdaBa",
new String [] { "ABAC\uDC16ADABA" });
+ a.close();
}
-
/**
* Test that LowercaseFilter handles the lowercasing correctly if the term
* buffer has a trailing surrogate character leftover and the current term in
@@ -223,17 +230,20 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new WhitespaceAnalyzer(), 1000*RANDOM_MULTIPLIER);
- checkRandomData(random(), new SimpleAnalyzer(), 1000*RANDOM_MULTIPLIER);
- checkRandomData(random(), new StopAnalyzer(), 1000*RANDOM_MULTIPLIER);
+ Analyzer analyzers[] = new Analyzer[] { new WhitespaceAnalyzer(), new SimpleAnalyzer(), new StopAnalyzer() };
+ for (Analyzer analyzer : analyzers) {
+ checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
+ }
+ IOUtils.close(analyzers);
}
/** blast some random large strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
- Random random = random();
- checkRandomData(random, new WhitespaceAnalyzer(), 100*RANDOM_MULTIPLIER, 8192);
- checkRandomData(random, new SimpleAnalyzer(), 100*RANDOM_MULTIPLIER, 8192);
- checkRandomData(random, new StopAnalyzer(), 100*RANDOM_MULTIPLIER, 8192);
+ Analyzer analyzers[] = new Analyzer[] { new WhitespaceAnalyzer(), new SimpleAnalyzer(), new StopAnalyzer() };
+ for (Analyzer analyzer : analyzers) {
+ checkRandomData(random(), analyzer, 100*RANDOM_MULTIPLIER, 8192);
+ }
+ IOUtils.close(analyzers);
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestBugInSomething.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestBugInSomething.java
index 5e354d2ac5f..32d2c78834d 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestBugInSomething.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestBugInSomething.java
@@ -75,6 +75,7 @@ public class TestBugInSomething extends BaseTokenStreamTestCase {
}
};
checkAnalysisConsistency(random(), a, false, "wmgddzunizdomqyj");
+ a.close();
}
CharFilter wrappedStream = new CharFilter(new StringReader("bogus")) {
@@ -261,6 +262,7 @@ public class TestBugInSomething extends BaseTokenStreamTestCase {
}
};
checkRandomData(random(), analyzer, 2000);
+ analyzer.close();
}
public void testCuriousWikipediaString() throws Exception {
@@ -285,5 +287,6 @@ public class TestBugInSomething extends BaseTokenStreamTestCase {
}
};
checkAnalysisConsistency(random(), a, false, "B\u28c3\ue0f8[ \ud800\udfc2
jb");
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java
index 97cb8b0758f..b76465e84f4 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java
@@ -30,6 +30,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.automaton.Operations;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
@@ -78,6 +79,7 @@ public class TestDuelingAnalyzers extends BaseTokenStreamTestCase {
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
right.tokenStream("foo", newStringReader(s)));
}
+ IOUtils.close(left, right);
}
// not so useful since it's all one token?!
@@ -99,6 +101,7 @@ public class TestDuelingAnalyzers extends BaseTokenStreamTestCase {
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
right.tokenStream("foo", newStringReader(s)));
}
+ IOUtils.close(left, right);
}
public void testLetterHtmlish() throws Exception {
@@ -116,6 +119,7 @@ public class TestDuelingAnalyzers extends BaseTokenStreamTestCase {
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
right.tokenStream("foo", newStringReader(s)));
}
+ IOUtils.close(left, right);
}
public void testLetterHtmlishHuge() throws Exception {
@@ -136,6 +140,7 @@ public class TestDuelingAnalyzers extends BaseTokenStreamTestCase {
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
right.tokenStream("foo", newStringReader(s)));
}
+ IOUtils.close(left, right);
}
public void testLetterUnicode() throws Exception {
@@ -153,6 +158,7 @@ public class TestDuelingAnalyzers extends BaseTokenStreamTestCase {
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
right.tokenStream("foo", newStringReader(s)));
}
+ IOUtils.close(left, right);
}
public void testLetterUnicodeHuge() throws Exception {
@@ -173,6 +179,7 @@ public class TestDuelingAnalyzers extends BaseTokenStreamTestCase {
assertEquals(s, left.tokenStream("foo", newStringReader(s)),
right.tokenStream("foo", newStringReader(s)));
}
+ IOUtils.close(left, right);
}
// we only check a few core attributes here.
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java
index ccdfc49f1fa..8150fda6e36 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java
@@ -46,6 +46,8 @@ import org.apache.lucene.util.Version;
// TODO: move this, TestRandomChains, and TestAllAnalyzersHaveFactories
// to an integration test module that sucks in all analysis modules.
// currently the only way to do this is via eclipse etc (LUCENE-3974)
+
+// TODO: fix this to use CustomAnalyzer instead of its own FactoryAnalyzer
public class TestFactories extends BaseTokenStreamTestCase {
public void test() throws IOException {
for (String tokenizer : TokenizerFactory.availableTokenizers()) {
@@ -77,7 +79,9 @@ public class TestFactories extends BaseTokenStreamTestCase {
// beast it just a little, it shouldnt throw exceptions:
// (it should have thrown them in initialize)
- checkRandomData(random(), new FactoryAnalyzer(factory, null, null), 20, 20, false, false);
+ Analyzer a = new FactoryAnalyzer(factory, null, null);
+ checkRandomData(random(), a, 20, 20, false, false);
+ a.close();
}
}
@@ -97,7 +101,9 @@ public class TestFactories extends BaseTokenStreamTestCase {
// beast it just a little, it shouldnt throw exceptions:
// (it should have thrown them in initialize)
- checkRandomData(random(), new FactoryAnalyzer(assertingTokenizer, factory, null), 20, 20, false, false);
+ Analyzer a = new FactoryAnalyzer(assertingTokenizer, factory, null);
+ checkRandomData(random(), a, 20, 20, false, false);
+ a.close();
}
}
@@ -117,7 +123,9 @@ public class TestFactories extends BaseTokenStreamTestCase {
// beast it just a little, it shouldnt throw exceptions:
// (it should have thrown them in initialize)
- checkRandomData(random(), new FactoryAnalyzer(assertingTokenizer, null, factory), 20, 20, false, false);
+ Analyzer a = new FactoryAnalyzer(assertingTokenizer, null, factory);
+ checkRandomData(random(), a, 20, 20, false, false);
+ a.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
index e3f88802633..03746bb3608 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
@@ -19,6 +19,7 @@ package org.apache.lucene.analysis.core;
import java.io.StringReader;
+import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
@@ -33,23 +34,24 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
private Directory directory;
- private IndexSearcher searcher;
private IndexReader reader;
+ private Analyzer analyzer;
@Override
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(new SimpleAnalyzer()));
+ analyzer = new SimpleAnalyzer();
+ IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(analyzer));
Document doc = new Document();
doc.add(new StringField("partnum", "Q36", Field.Store.YES));
@@ -59,13 +61,11 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
writer.close();
reader = DirectoryReader.open(directory);
- searcher = newSearcher(reader);
}
@Override
public void tearDown() throws Exception {
- reader.close();
- directory.close();
+ IOUtils.close(analyzer, reader, directory);
super.tearDown();
}
@@ -86,7 +86,8 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
public void testMutipleDocument() throws Exception {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new KeywordAnalyzer()));
+ Analyzer analyzer = new KeywordAnalyzer();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer));
Document doc = new Document();
doc.add(new TextField("partnum", "Q36", Field.Store.YES));
writer.addDocument(doc);
@@ -112,11 +113,13 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
null,
0);
assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ analyzer.close();
}
// LUCENE-1441
public void testOffsets() throws Exception {
- try (TokenStream stream = new KeywordAnalyzer().tokenStream("field", new StringReader("abcd"))) {
+ try (Analyzer analyzer = new KeywordAnalyzer();
+ TokenStream stream = analyzer.tokenStream("field", new StringReader("abcd"))) {
OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
stream.reset();
assertTrue(stream.incrementToken());
@@ -129,6 +132,8 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new KeywordAnalyzer(), 1000*RANDOM_MULTIPLIER);
+ Analyzer analyzer = new KeywordAnalyzer();
+ checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
+ analyzer.close();
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
index ced8b729f96..7b6ee171bcf 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
@@ -901,16 +901,17 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
int numIterations = TEST_NIGHTLY ? atLeast(20) : 3;
Random random = random();
for (int i = 0; i < numIterations; i++) {
- MockRandomAnalyzer a = new MockRandomAnalyzer(random.nextLong());
- if (VERBOSE) {
- System.out.println("Creating random analyzer:" + a);
- }
- try {
- checkRandomData(random, a, 500*RANDOM_MULTIPLIER, 20, false,
- false /* We already validate our own offsets... */);
- } catch (Throwable e) {
- System.err.println("Exception from random analyzer: " + a);
- throw e;
+ try (MockRandomAnalyzer a = new MockRandomAnalyzer(random.nextLong())) {
+ if (VERBOSE) {
+ System.out.println("Creating random analyzer:" + a);
+ }
+ try {
+ checkRandomData(random, a, 500*RANDOM_MULTIPLIER, 20, false,
+ false /* We already validate our own offsets... */);
+ } catch (Throwable e) {
+ System.err.println("Exception from random analyzer: " + a);
+ throw e;
+ }
}
}
}
@@ -920,16 +921,17 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
int numIterations = TEST_NIGHTLY ? atLeast(20) : 3;
Random random = random();
for (int i = 0; i < numIterations; i++) {
- MockRandomAnalyzer a = new MockRandomAnalyzer(random.nextLong());
- if (VERBOSE) {
- System.out.println("Creating random analyzer:" + a);
- }
- try {
- checkRandomData(random, a, 50*RANDOM_MULTIPLIER, 80, false,
- false /* We already validate our own offsets... */);
- } catch (Throwable e) {
- System.err.println("Exception from random analyzer: " + a);
- throw e;
+ try (MockRandomAnalyzer a = new MockRandomAnalyzer(random.nextLong())) {
+ if (VERBOSE) {
+ System.out.println("Creating random analyzer:" + a);
+ }
+ try {
+ checkRandomData(random, a, 50*RANDOM_MULTIPLIER, 80, false,
+ false /* We already validate our own offsets... */);
+ } catch (Throwable e) {
+ System.err.println("Exception from random analyzer: " + a);
+ throw e;
+ }
}
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java
index de8b061f070..36b5d416f2d 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java
@@ -30,7 +30,7 @@ import java.util.HashSet;
public class TestStopAnalyzer extends BaseTokenStreamTestCase {
- private StopAnalyzer stop = new StopAnalyzer();
+ private StopAnalyzer stop;
private Set