From 8dbd6e2870ac9b5bcee1ba6a0e875d550cb8d8cb Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Wed, 10 Feb 2010 13:35:57 +0000 Subject: [PATCH] LUCENE-2248: Change core tests to use a global Version constant git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@908496 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 4 + src/test/org/apache/lucene/TestDemo.java | 5 +- .../lucene/TestMergeSchedulerExternal.java | 3 +- src/test/org/apache/lucene/TestSearch.java | 5 +- .../lucene/TestSearchForDuplicates.java | 8 +- .../lucene/TestSnapshotDeletionPolicy.java | 8 +- .../analysis/TestASCIIFoldingFilter.java | 6 +- .../apache/lucene/analysis/TestAnalyzers.java | 23 +- .../analysis/TestCachingTokenFilter.java | 3 +- .../lucene/analysis/TestCharArrayMap.java | 9 +- .../lucene/analysis/TestCharArraySet.java | 46 ++-- .../lucene/analysis/TestCharTokenizers.java | 18 +- .../analysis/TestISOLatin1AccentFilter.java | 4 +- .../lucene/analysis/TestKeywordAnalyzer.java | 7 +- .../TestKeywordMarkerTokenFilter.java | 9 +- .../lucene/analysis/TestLengthFilter.java | 4 +- .../analysis/TestMappingCharFilter.java | 24 +- .../analysis/TestPerFieldAnalzyerWrapper.java | 5 +- .../lucene/analysis/TestPorterStemFilter.java | 6 +- .../lucene/analysis/TestStandardAnalyzer.java | 10 +- .../lucene/analysis/TestStopAnalyzer.java | 4 +- .../lucene/analysis/TestStopFilter.java | 22 +- .../analysis/TestTeeSinkTokenFilter.java | 20 +- .../lucene/collation/CollationTestBase.java | 7 +- .../lucene/document/TestBinaryDocument.java | 4 +- .../apache/lucene/document/TestDocument.java | 4 +- .../org/apache/lucene/index/DocHelper.java | 4 +- .../index/TestAddIndexesNoOptimize.java | 8 +- .../apache/lucene/index/TestAtomicUpdate.java | 2 +- .../index/TestBackwardsCompatibility.java | 17 +- .../apache/lucene/index/TestCheckIndex.java | 3 +- .../index/TestConcurrentMergeScheduler.java | 4 +- .../org/apache/lucene/index/TestCrash.java | 3 +- .../lucene/index/TestDeletionPolicy.java | 41 ++- .../lucene/index/TestDirectoryReader.java | 2 +- src/test/org/apache/lucene/index/TestDoc.java | 5 +- .../lucene/index/TestDocumentWriter.java | 13 +- .../apache/lucene/index/TestFieldsReader.java | 7 +- .../lucene/index/TestFilterIndexReader.java | 4 +- .../lucene/index/TestIndexFileDeleter.java | 6 +- .../apache/lucene/index/TestIndexReader.java | 105 ++++--- .../lucene/index/TestIndexReaderClone.java | 5 +- .../index/TestIndexReaderCloneNorms.java | 2 +- .../lucene/index/TestIndexReaderReopen.java | 17 +- .../apache/lucene/index/TestIndexWriter.java | 257 +++++++++--------- .../lucene/index/TestIndexWriterDelete.java | 27 +- .../index/TestIndexWriterExceptions.java | 5 +- .../index/TestIndexWriterLockRelease.java | 4 +- .../index/TestIndexWriterMergePolicy.java | 19 +- .../lucene/index/TestIndexWriterMerging.java | 4 +- .../lucene/index/TestIndexWriterReader.java | 41 ++- .../org/apache/lucene/index/TestLazyBug.java | 3 +- .../lucene/index/TestLazyProxSkipping.java | 5 +- .../lucene/index/TestMultiLevelSkipList.java | 3 +- .../index/TestNRTReaderWithThreads.java | 3 +- .../org/apache/lucene/index/TestNorms.java | 2 +- .../org/apache/lucene/index/TestOmitTf.java | 10 +- .../lucene/index/TestParallelReader.java | 16 +- .../index/TestParallelReaderEmptyIndex.java | 13 +- .../lucene/index/TestParallelTermEnum.java | 6 +- .../org/apache/lucene/index/TestPayloads.java | 5 +- .../lucene/index/TestSegmentTermDocs.java | 3 +- .../lucene/index/TestSegmentTermEnum.java | 8 +- .../lucene/index/TestStressIndexing.java | 2 +- .../lucene/index/TestStressIndexing2.java | 7 +- .../lucene/index/TestThreadedOptimize.java | 3 +- .../lucene/index/TestTransactionRollback.java | 8 +- .../apache/lucene/index/TestTransactions.java | 6 +- .../lucene/queryParser/TestMultiAnalyzer.java | 12 +- .../TestMultiFieldQueryParser.java | 51 ++-- .../lucene/queryParser/TestQueryParser.java | 65 +++-- .../lucene/search/BaseTestRangeFilter.java | 4 +- .../org/apache/lucene/search/QueryUtils.java | 4 +- .../apache/lucene/search/TestBoolean2.java | 9 +- .../search/TestBooleanMinShouldMatch.java | 3 +- .../apache/lucene/search/TestBooleanOr.java | 2 +- .../lucene/search/TestBooleanPrefixQuery.java | 4 +- .../lucene/search/TestBooleanQuery.java | 3 +- .../lucene/search/TestBooleanScorer.java | 3 +- .../search/TestCachingWrapperFilter.java | 4 +- .../lucene/search/TestCustomSearcherSort.java | 2 +- .../apache/lucene/search/TestDateFilter.java | 5 +- .../apache/lucene/search/TestDateSort.java | 5 +- .../search/TestDisjunctionMaxQuery.java | 3 +- .../apache/lucene/search/TestDocBoost.java | 3 +- .../apache/lucene/search/TestDocIdSet.java | 3 +- .../search/TestElevationComparator.java | 4 +- .../lucene/search/TestExplanations.java | 5 +- .../apache/lucene/search/TestFieldCache.java | 4 +- .../search/TestFieldCacheRangeFilter.java | 3 +- .../lucene/search/TestFilteredQuery.java | 4 +- .../lucene/search/TestFilteredSearch.java | 6 +- .../apache/lucene/search/TestFuzzyQuery.java | 11 +- .../lucene/search/TestMatchAllDocsQuery.java | 5 +- .../lucene/search/TestMultiPhraseQuery.java | 8 +- .../lucene/search/TestMultiSearcher.java | 12 +- .../search/TestMultiSearcherRanking.java | 10 +- .../search/TestMultiTermConstantScore.java | 8 +- .../search/TestMultiThreadTermVectors.java | 3 +- .../TestMultiValuedNumericRangeQuery.java | 3 +- .../org/apache/lucene/search/TestNot.java | 5 +- .../search/TestNumericRangeQuery32.java | 3 +- .../search/TestNumericRangeQuery64.java | 3 +- .../lucene/search/TestPhrasePrefixQuery.java | 3 +- .../apache/lucene/search/TestPhraseQuery.java | 12 +- .../lucene/search/TestPositionIncrement.java | 12 +- .../lucene/search/TestPrefixFilter.java | 3 +- .../search/TestPrefixInBooleanQuery.java | 4 +- .../apache/lucene/search/TestPrefixQuery.java | 3 +- .../lucene/search/TestQueryTermVector.java | 3 +- .../lucene/search/TestQueryWrapperFilter.java | 2 +- .../apache/lucene/search/TestScorerPerf.java | 6 +- .../org/apache/lucene/search/TestSetNorm.java | 3 +- .../apache/lucene/search/TestSimilarity.java | 4 +- .../lucene/search/TestSimpleExplanations.java | 7 +- .../lucene/search/TestSloppyPhraseQuery.java | 4 +- .../org/apache/lucene/search/TestSort.java | 5 +- .../lucene/search/TestSpanQueryFilter.java | 3 +- .../lucene/search/TestTermRangeFilter.java | 5 +- .../lucene/search/TestTermRangeQuery.java | 6 +- .../apache/lucene/search/TestTermScorer.java | 3 +- .../apache/lucene/search/TestTermVectors.java | 11 +- .../apache/lucene/search/TestThreadSafe.java | 3 +- .../search/TestTimeLimitingCollector.java | 5 +- .../apache/lucene/search/TestWildcard.java | 7 +- .../search/function/FunctionTestSetup.java | 2 +- .../search/function/TestCustomScoreQuery.java | 5 +- .../lucene/search/payloads/PayloadHelper.java | 4 +- .../search/payloads/TestPayloadNearQuery.java | 3 +- .../search/payloads/TestPayloadTermQuery.java | 3 +- .../lucene/search/spans/TestBasics.java | 3 +- .../spans/TestFieldMaskingSpanQuery.java | 3 +- .../search/spans/TestNearSpansOrdered.java | 5 +- .../lucene/search/spans/TestPayloadSpans.java | 5 +- .../apache/lucene/search/spans/TestSpans.java | 6 +- .../search/spans/TestSpansAdvanced.java | 2 +- .../search/spans/TestSpansAdvanced2.java | 2 +- .../lucene/store/TestBufferedIndexInput.java | 3 +- .../lucene/store/TestFileSwitchDirectory.java | 3 +- .../apache/lucene/store/TestLockFactory.java | 15 +- .../apache/lucene/store/TestRAMDirectory.java | 6 +- .../apache/lucene/store/TestWindowsMMap.java | 2 +- .../apache/lucene/util/LuceneTestCase.java | 2 + .../apache/lucene/util/LuceneTestCaseJ4.java | 7 +- .../lucene/util/TestCharacterUtils.java | 11 +- .../util/TestFieldCacheSanityChecker.java | 4 +- 146 files changed, 669 insertions(+), 799 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index fa4738ac934..ac0a5212e4c 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -233,6 +233,10 @@ Test Cases * LUCENE-2207, LUCENE-2219: Improve BaseTokenStreamTestCase to check if end() is implemented correctly. (Koji Sekiguchi, Robert Muir) +* LUCENE-2248, LUCENE-2251: Refactor tests to not use Version.LUCENE_CURRENT, + but instead use a global static value from LuceneTestCase(J4), that + contains the release version. (Uwe Schindler, Simon Willnauer) + ======================= Release 3.0.0 2009-11-25 ======================= Changes in backwards compatibility policy diff --git a/src/test/org/apache/lucene/TestDemo.java b/src/test/org/apache/lucene/TestDemo.java index f74f63c649e..0e0becefcbb 100644 --- a/src/test/org/apache/lucene/TestDemo.java +++ b/src/test/org/apache/lucene/TestDemo.java @@ -32,7 +32,6 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; /** * A very simple demo used in the API documentation (src/java/overview.html). @@ -44,7 +43,7 @@ public class TestDemo extends LuceneTestCase { public void testDemo() throws IOException, ParseException { - Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); + Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); // Store the index in memory: Directory directory = new RAMDirectory(); @@ -62,7 +61,7 @@ public class TestDemo extends LuceneTestCase { // Now search the index: IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true // Parse a simple query that searches for "text": - QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fieldname", analyzer); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fieldname", analyzer); Query query = parser.parse("text"); ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); diff --git a/src/test/org/apache/lucene/TestMergeSchedulerExternal.java b/src/test/org/apache/lucene/TestMergeSchedulerExternal.java index a135fc06860..8c4b80b9094 100644 --- a/src/test/org/apache/lucene/TestMergeSchedulerExternal.java +++ b/src/test/org/apache/lucene/TestMergeSchedulerExternal.java @@ -18,7 +18,6 @@ package org.apache.lucene; */ import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.Directory; @@ -96,7 +95,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase { Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(idField); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); MyMergeScheduler ms = new MyMergeScheduler(); writer.setMergeScheduler(ms); writer.setMaxBufferedDocs(2); diff --git a/src/test/org/apache/lucene/TestSearch.java b/src/test/org/apache/lucene/TestSearch.java index dad5d634dcc..ca85c3fcd02 100644 --- a/src/test/org/apache/lucene/TestSearch.java +++ b/src/test/org/apache/lucene/TestSearch.java @@ -22,7 +22,6 @@ import java.io.PrintWriter; import java.io.StringWriter; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import junit.framework.TestSuite; import junit.textui.TestRunner; @@ -74,7 +73,7 @@ public class TestSearch extends LuceneTestCase { throws Exception { Directory directory = new RAMDirectory(); - Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT); + Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT); IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); @@ -108,7 +107,7 @@ public class TestSearch extends LuceneTestCase { }; ScoreDoc[] hits = null; - QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "contents", analyzer); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "contents", analyzer); parser.setPhraseSlop(4); for (int j = 0; j < queries.length; j++) { Query query = parser.parse(queries[j]); diff --git a/src/test/org/apache/lucene/TestSearchForDuplicates.java b/src/test/org/apache/lucene/TestSearchForDuplicates.java index 0c6429ee0c7..5f0a13dff1c 100644 --- a/src/test/org/apache/lucene/TestSearchForDuplicates.java +++ b/src/test/org/apache/lucene/TestSearchForDuplicates.java @@ -27,8 +27,6 @@ import org.apache.lucene.analysis.*; import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.queryParser.*; -import org.apache.lucene.util.Version; - import org.apache.lucene.util.LuceneTestCase; import junit.framework.TestSuite; import junit.textui.TestRunner; @@ -79,7 +77,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { private void doTest(PrintWriter out, boolean useCompoundFiles) throws Exception { Directory directory = new RAMDirectory(); - Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT); + Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT); IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); @@ -98,7 +96,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { // try a search without OR Searcher searcher = new IndexSearcher(directory, true); - QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, PRIORITY_FIELD, analyzer); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer); Query query = parser.parse(HIGH_PRIORITY); out.println("Query: " + query.toString(PRIORITY_FIELD)); @@ -113,7 +111,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { searcher = new IndexSearcher(directory, true); hits = null; - parser = new QueryParser(Version.LUCENE_CURRENT, PRIORITY_FIELD, analyzer); + parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer); query = parser.parse(HIGH_PRIORITY + " OR " + MED_PRIORITY); out.println("Query: " + query.toString(PRIORITY_FIELD)); diff --git a/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java b/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java index 4f7a8b94d64..6ce929274f4 100644 --- a/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java +++ b/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java @@ -67,7 +67,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase Directory dir = new MockRAMDirectory(); SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); // Force frequent flushes writer.setMaxBufferedDocs(2); Document doc = new Document(); @@ -83,7 +83,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase writer.close(); copyFiles(dir, cp); - writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); copyFiles(dir, cp); for(int i=0;i<7;i++) { writer.addDocument(doc); @@ -95,7 +95,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase writer.close(); copyFiles(dir, cp); dp.release(); - writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); writer.close(); try { copyFiles(dir, cp); @@ -111,7 +111,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase final long stopTime = System.currentTimeMillis() + 1000; SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); + final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); // Force frequent flushes writer.setMaxBufferedDocs(2); diff --git a/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java b/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java index 2df053239af..d5c9594a5d8 100644 --- a/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java +++ b/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java @@ -18,8 +18,6 @@ package org.apache.lucene.analysis; */ import org.apache.lucene.analysis.tokenattributes.TermAttribute; -import org.apache.lucene.util.Version; - import java.io.StringReader; import java.util.List; import java.util.ArrayList; @@ -29,7 +27,7 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase { // testLain1Accents() is a copy of TestLatin1AccentFilter.testU(). public void testLatin1Accents() throws Exception { - TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader + TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader ("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ" +" Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij" +" ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl")); @@ -1890,7 +1888,7 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase { expectedOutputTokens.add(expected.toString()); } - TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(inputText.toString())); + TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(inputText.toString())); ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream); TermAttribute termAtt = filter.getAttribute(TermAttribute.class); Iterator expectedIter = expectedOutputTokens.iterator(); diff --git a/src/test/org/apache/lucene/analysis/TestAnalyzers.java b/src/test/org/apache/lucene/analysis/TestAnalyzers.java index 947650a7134..1d7d19a8f99 100644 --- a/src/test/org/apache/lucene/analysis/TestAnalyzers.java +++ b/src/test/org/apache/lucene/analysis/TestAnalyzers.java @@ -26,7 +26,6 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.index.Payload; -import org.apache.lucene.util.Version; public class TestAnalyzers extends BaseTokenStreamTestCase { @@ -35,7 +34,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { } public void testSimple() throws Exception { - Analyzer a = new SimpleAnalyzer(Version.LUCENE_CURRENT); + Analyzer a = new SimpleAnalyzer(TEST_VERSION_CURRENT); assertAnalyzesTo(a, "foo bar FOO BAR", new String[] { "foo", "bar", "foo", "bar" }); assertAnalyzesTo(a, "foo bar . FOO <> BAR", @@ -55,7 +54,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { } public void testNull() throws Exception { - Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT); + Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); assertAnalyzesTo(a, "foo bar FOO BAR", new String[] { "foo", "bar", "FOO", "BAR" }); assertAnalyzesTo(a, "foo bar . FOO <> BAR", @@ -75,7 +74,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { } public void testStop() throws Exception { - Analyzer a = new StopAnalyzer(Version.LUCENE_CURRENT); + Analyzer a = new StopAnalyzer(TEST_VERSION_CURRENT); assertAnalyzesTo(a, "foo bar FOO BAR", new String[] { "foo", "bar", "foo", "bar" }); assertAnalyzesTo(a, "foo a bar such FOO THESE BAR", @@ -97,11 +96,11 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { public void testPayloadCopy() throws IOException { String s = "how now brown cow"; TokenStream ts; - ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(s)); + ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(s)); ts = new PayloadSetter(ts); verifyPayload(ts); - ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(s)); + ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(s)); ts = new PayloadSetter(ts); verifyPayload(ts); } @@ -122,12 +121,12 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { private static class MyStandardAnalyzer extends StandardAnalyzer { public MyStandardAnalyzer() { - super(org.apache.lucene.util.Version.LUCENE_CURRENT); + super(TEST_VERSION_CURRENT); } @Override public TokenStream tokenStream(String field, Reader reader) { - return new WhitespaceAnalyzer(Version.LUCENE_CURRENT).tokenStream(field, reader); + return new WhitespaceAnalyzer(TEST_VERSION_CURRENT).tokenStream(field, reader); } } @@ -144,8 +143,8 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { @Override public TokenStream tokenStream(String fieldName, Reader reader) { - return new LowerCaseFilter(Version.LUCENE_CURRENT, - new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader)); + return new LowerCaseFilter(TEST_VERSION_CURRENT, + new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader)); } } @@ -192,9 +191,9 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { public void testLowerCaseFilterLowSurrogateLeftover() throws IOException { // test if the limit of the termbuffer is correctly used with supplementary // chars - WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, + WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("BogustermBogusterm\udc16")); - LowerCaseFilter filter = new LowerCaseFilter(Version.LUCENE_CURRENT, + LowerCaseFilter filter = new LowerCaseFilter(TEST_VERSION_CURRENT, tokenizer); assertTokenStreamContents(filter, new String[] {"bogustermbogusterm\udc16"}); filter.reset(); diff --git a/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java index 9f4af94cef4..30602a7e4d1 100644 --- a/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java +++ b/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java @@ -31,14 +31,13 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.TermPositions; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.Version; public class TestCachingTokenFilter extends BaseTokenStreamTestCase { private String[] tokens = new String[] {"term1", "term2", "term3", "term2"}; public void testCaching() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); TokenStream stream = new TokenStream() { private int index = 0; diff --git a/src/test/org/apache/lucene/analysis/TestCharArrayMap.java b/src/test/org/apache/lucene/analysis/TestCharArrayMap.java index 124fe9c3440..5e5578a3dd2 100644 --- a/src/test/org/apache/lucene/analysis/TestCharArrayMap.java +++ b/src/test/org/apache/lucene/analysis/TestCharArrayMap.java @@ -19,13 +19,12 @@ package org.apache.lucene.analysis; import java.util.*; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; public class TestCharArrayMap extends LuceneTestCase { Random r = newRandom(); public void doRandom(int iter, boolean ignoreCase) { - CharArrayMap map = new CharArrayMap(Version.LUCENE_CURRENT, 1, ignoreCase); + CharArrayMap map = new CharArrayMap(TEST_VERSION_CURRENT, 1, ignoreCase); HashMap hmap = new HashMap(); char[] key; @@ -63,7 +62,7 @@ public class TestCharArrayMap extends LuceneTestCase { } public void testMethods() { - CharArrayMap cm = new CharArrayMap(Version.LUCENE_CURRENT, 2, false); + CharArrayMap cm = new CharArrayMap(TEST_VERSION_CURRENT, 2, false); HashMap hm = new HashMap(); hm.put("foo",1); hm.put("bar",2); @@ -131,7 +130,7 @@ public class TestCharArrayMap extends LuceneTestCase { } public void testModifyOnUnmodifiable(){ - CharArrayMap map = new CharArrayMap(Version.LUCENE_CURRENT, 2, false); + CharArrayMap map = new CharArrayMap(TEST_VERSION_CURRENT, 2, false); map.put("foo",1); map.put("bar",2); final int size = map.size(); @@ -228,7 +227,7 @@ public class TestCharArrayMap extends LuceneTestCase { } public void testToString() { - CharArrayMap cm = new CharArrayMap(Version.LUCENE_CURRENT, Collections.singletonMap("test",1), false); + CharArrayMap cm = new CharArrayMap(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false); assertEquals("[test]",cm.keySet().toString()); assertEquals("[1]",cm.values().toString()); assertEquals("[test=1]",cm.entrySet().toString()); diff --git a/src/test/org/apache/lucene/analysis/TestCharArraySet.java b/src/test/org/apache/lucene/analysis/TestCharArraySet.java index f2efbd1be93..bd46543c839 100755 --- a/src/test/org/apache/lucene/analysis/TestCharArraySet.java +++ b/src/test/org/apache/lucene/analysis/TestCharArraySet.java @@ -41,7 +41,7 @@ public class TestCharArraySet extends LuceneTestCase { public void testRehash() throws Exception { - CharArraySet cas = new CharArraySet(Version.LUCENE_CURRENT, 0, true); + CharArraySet cas = new CharArraySet(TEST_VERSION_CURRENT, 0, true); for(int i=0;i would not hit any element of the CAS and therefor never call // remove() on the iterator try{ - set.removeAll(new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(TEST_STOP_WORDS), true)); + set.removeAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true)); fail("Modified unmodifiable set"); }catch (UnsupportedOperationException e) { // expected @@ -158,7 +158,7 @@ public class TestCharArraySet extends LuceneTestCase { } try{ - set.retainAll(new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(NOT_IN_SET), true)); + set.retainAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(NOT_IN_SET), true)); fail("Modified unmodifiable set"); }catch (UnsupportedOperationException e) { // expected @@ -179,7 +179,7 @@ public class TestCharArraySet extends LuceneTestCase { } public void testUnmodifiableSet(){ - CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 10,true); + CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10,true); set.addAll(Arrays.asList(TEST_STOP_WORDS)); set.add(Integer.valueOf(1)); final int size = set.size(); @@ -209,7 +209,7 @@ public class TestCharArraySet extends LuceneTestCase { "\ud801\udc1c\ud801\udc1cCDE", "A\ud801\udc1cB"}; String[] lowerArr = new String[] {"abc\ud801\udc44", "\ud801\udc44\ud801\udc44cde", "a\ud801\udc44b"}; - CharArraySet set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS), true); + CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true); for (String upper : upperArr) { set.add(upper); } @@ -217,7 +217,7 @@ public class TestCharArraySet extends LuceneTestCase { assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i])); assertTrue(String.format(missing, lowerArr[i]), set.contains(lowerArr[i])); } - set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS), false); + set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), false); for (String upper : upperArr) { set.add(upper); } @@ -235,7 +235,7 @@ public class TestCharArraySet extends LuceneTestCase { String[] lowerArr = new String[] { "abc\uD800", "abc\uD800efg", "\uD800efg", "\uD800\ud801\udc44b" }; - CharArraySet set = new CharArraySet(Version.LUCENE_31, Arrays + CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays .asList(TEST_STOP_WORDS), true); for (String upper : upperArr) { set.add(upper); @@ -244,7 +244,7 @@ public class TestCharArraySet extends LuceneTestCase { assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i])); assertTrue(String.format(missing, lowerArr[i]), set.contains(lowerArr[i])); } - set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS), + set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), false); for (String upper : upperArr) { set.add(upper); @@ -328,8 +328,8 @@ public class TestCharArraySet extends LuceneTestCase { } public void testCopyCharArraySetBWCompat() { - CharArraySet setIngoreCase = new CharArraySet(Version.LUCENE_CURRENT, 10, true); - CharArraySet setCaseSensitive = new CharArraySet(Version.LUCENE_CURRENT, 10, false); + CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true); + CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false); List stopwords = Arrays.asList(TEST_STOP_WORDS); List stopwordsUpper = new ArrayList(); @@ -375,8 +375,8 @@ public class TestCharArraySet extends LuceneTestCase { * Test the static #copy() function with a CharArraySet as a source */ public void testCopyCharArraySet() { - CharArraySet setIngoreCase = new CharArraySet(Version.LUCENE_CURRENT, 10, true); - CharArraySet setCaseSensitive = new CharArraySet(Version.LUCENE_CURRENT, 10, false); + CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true); + CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false); List stopwords = Arrays.asList(TEST_STOP_WORDS); List stopwordsUpper = new ArrayList(); @@ -388,8 +388,8 @@ public class TestCharArraySet extends LuceneTestCase { setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS)); setCaseSensitive.add(Integer.valueOf(1)); - CharArraySet copy = CharArraySet.copy(Version.LUCENE_CURRENT, setIngoreCase); - CharArraySet copyCaseSens = CharArraySet.copy(Version.LUCENE_CURRENT, setCaseSensitive); + CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, setIngoreCase); + CharArraySet copyCaseSens = CharArraySet.copy(TEST_VERSION_CURRENT, setCaseSensitive); assertEquals(setIngoreCase.size(), copy.size()); assertEquals(setCaseSensitive.size(), copy.size()); @@ -431,7 +431,7 @@ public class TestCharArraySet extends LuceneTestCase { } set.addAll(Arrays.asList(TEST_STOP_WORDS)); - CharArraySet copy = CharArraySet.copy(Version.LUCENE_CURRENT, set); + CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, set); assertEquals(set.size(), copy.size()); assertEquals(set.size(), copy.size()); @@ -461,7 +461,7 @@ public class TestCharArraySet extends LuceneTestCase { */ public void testCopyEmptySet() { assertSame(CharArraySet.EMPTY_SET, - CharArraySet.copy(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET)); + CharArraySet.copy(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET)); } /** @@ -483,7 +483,7 @@ public class TestCharArraySet extends LuceneTestCase { * Test for NPE */ public void testContainsWithNull() { - CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true); + CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true); try { set.contains((char[]) null, 0, 10); fail("null value must raise NPE"); @@ -506,7 +506,7 @@ public class TestCharArraySet extends LuceneTestCase { assertTrue("in 3.0 version, iterator should be CharArraySetIterator", ((Iterator) CharArraySet.copy(Version.LUCENE_30, hset).iterator()) instanceof CharArraySet.CharArraySetIterator); - CharArraySet set = CharArraySet.copy(Version.LUCENE_CURRENT, hset); + CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, hset); assertFalse("in current version, iterator should not be CharArraySetIterator", ((Iterator) set.iterator()) instanceof CharArraySet.CharArraySetIterator); @@ -525,7 +525,7 @@ public class TestCharArraySet extends LuceneTestCase { } public void testToString() { - CharArraySet set = CharArraySet.copy(Version.LUCENE_CURRENT, Collections.singleton("test")); + CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, Collections.singleton("test")); assertEquals("[test]", set.toString()); set.add("test2"); assertTrue(set.toString().contains(", ")); diff --git a/src/test/org/apache/lucene/analysis/TestCharTokenizers.java b/src/test/org/apache/lucene/analysis/TestCharTokenizers.java index 7c578f8ceb7..c26880a135a 100644 --- a/src/test/org/apache/lucene/analysis/TestCharTokenizers.java +++ b/src/test/org/apache/lucene/analysis/TestCharTokenizers.java @@ -46,7 +46,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { // internal buffer size is 1024 make sure we have a surrogate pair right at the border builder.insert(1023, "\ud801\udc1c"); LowerCaseTokenizer tokenizer = new LowerCaseTokenizer( - Version.LUCENE_CURRENT, new StringReader(builder.toString())); + TEST_VERSION_CURRENT, new StringReader(builder.toString())); assertTokenStreamContents(tokenizer, builder.toString().toLowerCase().split(" ")); } @@ -64,7 +64,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { } builder.append("\ud801\udc1cabc"); LowerCaseTokenizer tokenizer = new LowerCaseTokenizer( - Version.LUCENE_CURRENT, new StringReader(builder.toString())); + TEST_VERSION_CURRENT, new StringReader(builder.toString())); assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase()}); } } @@ -79,7 +79,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { builder.append("A"); } LowerCaseTokenizer tokenizer = new LowerCaseTokenizer( - Version.LUCENE_CURRENT, new StringReader(builder.toString() + builder.toString())); + TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString())); assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()}); } @@ -94,13 +94,13 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { } builder.append("\ud801\udc1c"); LowerCaseTokenizer tokenizer = new LowerCaseTokenizer( - Version.LUCENE_CURRENT, new StringReader(builder.toString() + builder.toString())); + TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString())); assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()}); } public void testLowerCaseTokenizer() throws IOException { StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest"); - LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(Version.LUCENE_CURRENT, + LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader); assertTokenStreamContents(tokenizer, new String[] { "tokenizer", "\ud801\udc44test" }); @@ -115,7 +115,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { public void testWhitespaceTokenizer() throws IOException { StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest"); - WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, + WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); assertTokenStreamContents(tokenizer, new String[] { "Tokenizer", "\ud801\udc1ctest" }); @@ -132,7 +132,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { public void testIsTokenCharCharInSubclass() { new TestingCharTokenizer(Version.LUCENE_30, new StringReader("")); try { - new TestingCharTokenizer(Version.LUCENE_CURRENT, new StringReader("")); + new TestingCharTokenizer(TEST_VERSION_CURRENT, new StringReader("")); fail("version 3.1 is not permitted if char based method is implemented"); } catch (IllegalArgumentException e) { // expected @@ -142,7 +142,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { public void testNormalizeCharInSubclass() { new TestingCharTokenizerNormalize(Version.LUCENE_30, new StringReader("")); try { - new TestingCharTokenizerNormalize(Version.LUCENE_CURRENT, + new TestingCharTokenizerNormalize(TEST_VERSION_CURRENT, new StringReader("")); fail("version 3.1 is not permitted if char based method is implemented"); } catch (IllegalArgumentException e) { @@ -154,7 +154,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { new TestingCharTokenizerNormalizeIsTokenChar(Version.LUCENE_30, new StringReader("")); try { - new TestingCharTokenizerNormalizeIsTokenChar(Version.LUCENE_CURRENT, + new TestingCharTokenizerNormalizeIsTokenChar(TEST_VERSION_CURRENT, new StringReader("")); fail("version 3.1 is not permitted if char based method is implemented"); } catch (IllegalArgumentException e) { diff --git a/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java b/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java index 92cc6695594..e424ccc06f6 100644 --- a/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java +++ b/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java @@ -18,13 +18,11 @@ package org.apache.lucene.analysis; */ import org.apache.lucene.analysis.tokenattributes.TermAttribute; -import org.apache.lucene.util.Version; - import java.io.StringReader; public class TestISOLatin1AccentFilter extends BaseTokenStreamTestCase { public void testU() throws Exception { - TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl")); + TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl")); ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream); TermAttribute termAtt = filter.getAttribute(TermAttribute.class); assertTermEquals("Des", filter, termAtt); diff --git a/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java b/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java index 234a98dee72..0c8cf1d258a 100644 --- a/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java +++ b/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.Version; public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { @@ -43,7 +42,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { super.setUp(); directory = new RAMDirectory(); IndexWriter writer = new IndexWriter(directory, - new SimpleAnalyzer(Version.LUCENE_CURRENT), + new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); @@ -57,10 +56,10 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { } public void testPerFieldAnalyzer() throws Exception { - PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(Version.LUCENE_CURRENT)); + PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(TEST_VERSION_CURRENT)); analyzer.addAnalyzer("partnum", new KeywordAnalyzer()); - QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, "description", analyzer); + QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, "description", analyzer); Query query = queryParser.parse("partnum:Q36 AND SPACE"); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; diff --git a/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java b/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java index d2e16342cfb..9826c38c4ff 100644 --- a/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java +++ b/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java @@ -7,7 +7,6 @@ import java.util.Set; import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; -import org.apache.lucene.util.Version; import org.junit.Test; /** @@ -34,21 +33,21 @@ public class TestKeywordMarkerTokenFilter extends BaseTokenStreamTestCase { @Test public void testIncrementToken() throws IOException { - CharArraySet set = new CharArraySet(Version.LUCENE_31, 5, true); + CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 5, true); set.add("lucenefox"); String[] output = new String[] { "the", "quick", "brown", "LuceneFox", "jumps" }; assertTokenStreamContents(new LowerCaseFilterMock( - new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader( + new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader( "The quIck browN LuceneFox Jumps")), set)), output); Set jdkSet = new HashSet(); jdkSet.add("LuceneFox"); assertTokenStreamContents(new LowerCaseFilterMock( - new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader( + new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader( "The quIck browN LuceneFox Jumps")), jdkSet)), output); Set set2 = set; assertTokenStreamContents(new LowerCaseFilterMock( - new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader( + new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader( "The quIck browN LuceneFox Jumps")), set2)), output); } diff --git a/src/test/org/apache/lucene/analysis/TestLengthFilter.java b/src/test/org/apache/lucene/analysis/TestLengthFilter.java index 343cb87aa1e..94f4a9570a3 100644 --- a/src/test/org/apache/lucene/analysis/TestLengthFilter.java +++ b/src/test/org/apache/lucene/analysis/TestLengthFilter.java @@ -18,14 +18,12 @@ package org.apache.lucene.analysis; */ import org.apache.lucene.analysis.tokenattributes.TermAttribute; -import org.apache.lucene.util.Version; - import java.io.StringReader; public class TestLengthFilter extends BaseTokenStreamTestCase { public void testFilter() throws Exception { - TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, + TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("short toolong evenmuchlongertext a ab toolong foo")); LengthFilter filter = new LengthFilter(stream, 2, 6); TermAttribute termAtt = filter.getAttribute(TermAttribute.class); diff --git a/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java b/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java index d3fdfbc78fe..52e48a53061 100644 --- a/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java +++ b/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java @@ -19,8 +19,6 @@ package org.apache.lucene.analysis; import java.io.StringReader; -import org.apache.lucene.util.Version; - public class TestMappingCharFilter extends BaseTokenStreamTestCase { NormalizeCharMap normMap; @@ -60,55 +58,55 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { public void testNothingChange() throws Exception { CharStream cs = new MappingCharFilter( normMap, new StringReader( "x" ) ); - TokenStream ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[]{"x"}, new int[]{0}, new int[]{1}); } public void test1to1() throws Exception { CharStream cs = new MappingCharFilter( normMap, new StringReader( "h" ) ); - TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[]{"i"}, new int[]{0}, new int[]{1}); } public void test1to2() throws Exception { CharStream cs = new MappingCharFilter( normMap, new StringReader( "j" ) ); - TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[]{"jj"}, new int[]{0}, new int[]{1}); } public void test1to3() throws Exception { CharStream cs = new MappingCharFilter( normMap, new StringReader( "k" ) ); - TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[]{"kkk"}, new int[]{0}, new int[]{1}); } public void test2to4() throws Exception { CharStream cs = new MappingCharFilter( normMap, new StringReader( "ll" ) ); - TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[]{"llll"}, new int[]{0}, new int[]{2}); } public void test2to1() throws Exception { CharStream cs = new MappingCharFilter( normMap, new StringReader( "aa" ) ); - TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[]{"a"}, new int[]{0}, new int[]{2}); } public void test3to1() throws Exception { CharStream cs = new MappingCharFilter( normMap, new StringReader( "bbb" ) ); - TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[]{"b"}, new int[]{0}, new int[]{3}); } public void test4to2() throws Exception { CharStream cs = new MappingCharFilter( normMap, new StringReader( "cccc" ) ); - TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[]{"cc"}, new int[]{0}, new int[]{4}); } public void test5to0() throws Exception { CharStream cs = new MappingCharFilter( normMap, new StringReader( "empty" ) ); - TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[0]); } @@ -132,7 +130,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { // public void testTokenStream() throws Exception { CharStream cs = new MappingCharFilter( normMap, CharReader.get( new StringReader( "h i j k ll cccc bbb aa" ) ) ); - TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[]{"i","i","jj","kkk","llll","cc","b","a"}, new int[]{0,2,4,6,8,11,16,20}, @@ -153,7 +151,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { public void testChained() throws Exception { CharStream cs = new MappingCharFilter( normMap, new MappingCharFilter( normMap, CharReader.get( new StringReader( "aaaa ll h" ) ) ) ); - TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs ); + TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs ); assertTokenStreamContents(ts, new String[]{"a","llllllll","i"}, new int[]{0,5,8}, diff --git a/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java b/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java index 34296093841..be5fcd62ec7 100644 --- a/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java +++ b/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java @@ -3,7 +3,6 @@ package org.apache.lucene.analysis; import java.io.StringReader; import org.apache.lucene.analysis.tokenattributes.TermAttribute; -import org.apache.lucene.util.Version; /** * Licensed to the Apache Software Foundation (ASF) under one or more @@ -26,8 +25,8 @@ public class TestPerFieldAnalzyerWrapper extends BaseTokenStreamTestCase { public void testPerField() throws Exception { String text = "Qwerty"; PerFieldAnalyzerWrapper analyzer = - new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT)); - analyzer.addAnalyzer("special", new SimpleAnalyzer(Version.LUCENE_CURRENT)); + new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)); + analyzer.addAnalyzer("special", new SimpleAnalyzer(TEST_VERSION_CURRENT)); TokenStream tokenStream = analyzer.tokenStream("field", new StringReader(text)); diff --git a/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java b/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java index 17ca0d6d7e0..30493d19f8e 100644 --- a/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java +++ b/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java @@ -25,8 +25,6 @@ import java.io.InputStreamReader; import java.io.StringReader; import java.util.zip.ZipFile; -import org.apache.lucene.util.Version; - /** * Test the PorterStemFilter with Martin Porter's test data. */ @@ -60,9 +58,9 @@ public class TestPorterStemFilter extends BaseTokenStreamTestCase { } public void testWithKeywordAttribute() throws IOException { - CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true); + CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true); set.add("yourselves"); - Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("yourselves yours")); + Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("yourselves yours")); TokenStream filter = new PorterStemFilter(new KeywordMarkerTokenFilter(tokenizer, set)); assertTokenStreamContents(filter, new String[] {"yourselves", "your"}); } diff --git a/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java b/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java index 878a7a42650..e1d03f8c0ff 100644 --- a/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java +++ b/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java @@ -23,16 +23,16 @@ import org.apache.lucene.util.Version; public class TestStandardAnalyzer extends BaseTokenStreamTestCase { - private Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + private Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT); public void testMaxTermLength() throws Exception { - StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + StandardAnalyzer sa = new StandardAnalyzer(TEST_VERSION_CURRENT); sa.setMaxTokenLength(5); assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "xy", "z"}); } public void testMaxTermLength2() throws Exception { - StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + StandardAnalyzer sa = new StandardAnalyzer(TEST_VERSION_CURRENT); assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "toolong", "xy", "z"}); sa.setMaxTokenLength(5); @@ -96,7 +96,7 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase { public void testLucene1140() throws Exception { try { - StandardAnalyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); assertAnalyzesTo(analyzer, "www.nutch.org.", new String[]{ "www.nutch.org" }, new String[] { "" }); } catch (NullPointerException e) { fail("Should not throw an NPE and it did"); @@ -106,7 +106,7 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase { public void testDomainNames() throws Exception { // Current lucene should not show the bug - StandardAnalyzer a2 = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + StandardAnalyzer a2 = new StandardAnalyzer(TEST_VERSION_CURRENT); // domain names assertAnalyzesTo(a2, "www.nutch.org", new String[]{"www.nutch.org"}); diff --git a/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java b/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java index e7171c7dc90..18e84305342 100644 --- a/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java +++ b/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java @@ -29,7 +29,7 @@ import java.util.HashSet; public class TestStopAnalyzer extends BaseTokenStreamTestCase { - private StopAnalyzer stop = new StopAnalyzer(Version.LUCENE_CURRENT); + private StopAnalyzer stop = new StopAnalyzer(TEST_VERSION_CURRENT); private Set inValidTokens = new HashSet(); public TestStopAnalyzer(String s) { @@ -82,7 +82,7 @@ public class TestStopAnalyzer extends BaseTokenStreamTestCase { stopWordsSet.add("good"); stopWordsSet.add("test"); stopWordsSet.add("analyzer"); - StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_CURRENT, stopWordsSet); + StopAnalyzer newStop = new StopAnalyzer(TEST_VERSION_CURRENT, stopWordsSet); StringReader reader = new StringReader("This is a good test of the english stop analyzer with positions"); int expectedIncr[] = { 1, 1, 1, 3, 1, 1, 1, 2, 1}; TokenStream stream = newStop.tokenStream("test", reader); diff --git a/src/test/org/apache/lucene/analysis/TestStopFilter.java b/src/test/org/apache/lucene/analysis/TestStopFilter.java index 83ce5bf0d6b..5c9db5f686a 100644 --- a/src/test/org/apache/lucene/analysis/TestStopFilter.java +++ b/src/test/org/apache/lucene/analysis/TestStopFilter.java @@ -38,7 +38,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase { public void testExactCase() throws IOException { StringReader reader = new StringReader("Now is The Time"); Set stopWords = new HashSet(Arrays.asList("is", "the", "Time")); - TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopWords, false); + TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopWords, false); final TermAttribute termAtt = stream.getAttribute(TermAttribute.class); assertTrue(stream.incrementToken()); assertEquals("Now", termAtt.term()); @@ -50,7 +50,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase { public void testIgnoreCase() throws IOException { StringReader reader = new StringReader("Now is The Time"); Set stopWords = new HashSet(Arrays.asList( "is", "the", "Time" )); - TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopWords, true); + TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopWords, true); final TermAttribute termAtt = stream.getAttribute(TermAttribute.class); assertTrue(stream.incrementToken()); assertEquals("Now", termAtt.term()); @@ -60,8 +60,8 @@ public class TestStopFilter extends BaseTokenStreamTestCase { public void testStopFilt() throws IOException { StringReader reader = new StringReader("Now is The Time"); String[] stopWords = new String[] { "is", "the", "Time" }; - Set stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords); - TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet); + Set stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords); + TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet); final TermAttribute termAtt = stream.getAttribute(TermAttribute.class); assertTrue(stream.incrementToken()); assertEquals("Now", termAtt.term()); @@ -84,14 +84,14 @@ public class TestStopFilter extends BaseTokenStreamTestCase { log(sb.toString()); String stopWords[] = a.toArray(new String[0]); for (int i=0; i stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords); + Set stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords); // with increments StringReader reader = new StringReader(sb.toString()); - StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet); + StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet); doTestStopPositons(stpf,true); // without increments reader = new StringReader(sb.toString()); - stpf = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet); + stpf = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet); doTestStopPositons(stpf,false); // with increments, concatenating two stop filters ArrayList a0 = new ArrayList(); @@ -107,12 +107,12 @@ public class TestStopFilter extends BaseTokenStreamTestCase { for (int i=0; i stopSet0 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords0); - Set stopSet1 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords1); + Set stopSet0 = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords0); + Set stopSet1 = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords1); reader = new StringReader(sb.toString()); - StopFilter stpf0 = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet0); // first part of the set + StopFilter stpf0 = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet0); // first part of the set stpf0.setEnablePositionIncrements(true); - StopFilter stpf01 = new StopFilter(Version.LUCENE_CURRENT, stpf0, stopSet1); // two stop filters concatenated! + StopFilter stpf01 = new StopFilter(TEST_VERSION_CURRENT, stpf0, stopSet1); // two stop filters concatenated! doTestStopPositons(stpf01,true); } diff --git a/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java b/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java index e88cb202092..cc80f019f0c 100644 --- a/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java +++ b/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java @@ -22,8 +22,6 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.English; -import org.apache.lucene.util.Version; - import java.io.IOException; import java.io.StringReader; @@ -76,7 +74,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase { public void testGeneral() throws IOException { - final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer1.toString()))); + final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer1.toString()))); final TokenStream sink1 = source.newSinkTokenStream(); final TokenStream sink2 = source.newSinkTokenStream(theFilter); @@ -90,7 +88,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase { } public void testMultipleSources() throws Exception { - final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer1.toString()))); + final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer1.toString()))); final TeeSinkTokenFilter.SinkTokenStream dogDetector = tee1.newSinkTokenStream(dogFilter); final TeeSinkTokenFilter.SinkTokenStream theDetector = tee1.newSinkTokenStream(theFilter); final TokenStream source1 = new CachingTokenFilter(tee1); @@ -99,7 +97,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase { dogDetector.addAttribute(CheckClearAttributesAttribute.class); theDetector.addAttribute(CheckClearAttributesAttribute.class); - final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer2.toString()))); + final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer2.toString()))); tee2.addSinkTokenStream(dogDetector); tee2.addSinkTokenStream(theDetector); final TokenStream source2 = tee2; @@ -111,7 +109,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase { assertTokenStreamContents(dogDetector, new String[]{"Dogs", "Dogs"}); source1.reset(); - TokenStream lowerCasing = new LowerCaseFilter(Version.LUCENE_CURRENT, source1); + TokenStream lowerCasing = new LowerCaseFilter(TEST_VERSION_CURRENT, source1); String[] lowerCaseTokens = new String[tokens1.length]; for (int i = 0; i < tokens1.length; i++) lowerCaseTokens[i] = tokens1[i].toLowerCase(); @@ -133,10 +131,10 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase { buffer.append(English.intToEnglish(i).toUpperCase()).append(' '); } //make sure we produce the same tokens - TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString())))); + TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString())))); TokenStream sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(100)); teeStream.consumeAllTokens(); - TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))), 100); + TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), 100); TermAttribute tfTok = stream.addAttribute(TermAttribute.class); TermAttribute sinkTok = sink.addAttribute(TermAttribute.class); for (int i=0; stream.incrementToken(); i++) { @@ -149,12 +147,12 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase { int tfPos = 0; long start = System.currentTimeMillis(); for (int i = 0; i < 20; i++) { - stream = new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))); + stream = new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))); PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class); while (stream.incrementToken()) { tfPos += posIncrAtt.getPositionIncrement(); } - stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))), modCounts[j]); + stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), modCounts[j]); posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class); while (stream.incrementToken()) { tfPos += posIncrAtt.getPositionIncrement(); @@ -166,7 +164,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase { //simulate one field with one sink start = System.currentTimeMillis(); for (int i = 0; i < 20; i++) { - teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString())))); + teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString())))); sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(modCounts[j])); PositionIncrementAttribute posIncrAtt = teeStream.getAttribute(PositionIncrementAttribute.class); while (teeStream.incrementToken()) { diff --git a/src/test/org/apache/lucene/collation/CollationTestBase.java b/src/test/org/apache/lucene/collation/CollationTestBase.java index 285f9bfc973..69f9e3afd1f 100644 --- a/src/test/org/apache/lucene/collation/CollationTestBase.java +++ b/src/test/org/apache/lucene/collation/CollationTestBase.java @@ -18,7 +18,6 @@ package org.apache.lucene.collation; */ -import junit.framework.TestCase; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; import org.apache.lucene.analysis.WhitespaceAnalyzer; @@ -38,14 +37,14 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.document.Field; import org.apache.lucene.document.Document; import org.apache.lucene.util.IndexableBinaryStringTools; -import org.apache.lucene.util.Version; +import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; import java.nio.CharBuffer; import java.nio.ByteBuffer; -public class CollationTestBase extends TestCase { +public class CollationTestBase extends LuceneTestCase { protected String firstRangeBeginningOriginal = "\u062F"; protected String firstRangeEndOriginal = "\u0698"; @@ -179,7 +178,7 @@ public class CollationTestBase extends TestCase { String usResult) throws Exception { RAMDirectory indexStore = new RAMDirectory(); PerFieldAnalyzerWrapper analyzer - = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT)); + = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)); analyzer.addAnalyzer("US", usAnalyzer); analyzer.addAnalyzer("France", franceAnalyzer); analyzer.addAnalyzer("Sweden", swedenAnalyzer); diff --git a/src/test/org/apache/lucene/document/TestBinaryDocument.java b/src/test/org/apache/lucene/document/TestBinaryDocument.java index 25e1607ed8d..64f77b583a0 100644 --- a/src/test/org/apache/lucene/document/TestBinaryDocument.java +++ b/src/test/org/apache/lucene/document/TestBinaryDocument.java @@ -59,7 +59,7 @@ public class TestBinaryDocument extends LuceneTestCase /** add the doc to a ram index */ MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(doc); writer.close(); @@ -97,7 +97,7 @@ public class TestBinaryDocument extends LuceneTestCase /** add the doc to a ram index */ MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(doc); writer.close(); diff --git a/src/test/org/apache/lucene/document/TestDocument.java b/src/test/org/apache/lucene/document/TestDocument.java index 079ab6e77e6..558a27f7b48 100644 --- a/src/test/org/apache/lucene/document/TestDocument.java +++ b/src/test/org/apache/lucene/document/TestDocument.java @@ -154,7 +154,7 @@ public class TestDocument extends LuceneTestCase public void testGetValuesForIndexedDocument() throws Exception { RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(makeDocumentWithFields()); writer.close(); @@ -225,7 +225,7 @@ public class TestDocument extends LuceneTestCase doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED)); RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(doc); field.setValue("id2"); writer.addDocument(doc); diff --git a/src/test/org/apache/lucene/index/DocHelper.java b/src/test/org/apache/lucene/index/DocHelper.java index 150f36fe61a..a21cbc9b840 100644 --- a/src/test/org/apache/lucene/index/DocHelper.java +++ b/src/test/org/apache/lucene/index/DocHelper.java @@ -29,7 +29,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.Fieldable; import org.apache.lucene.search.Similarity; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Version; +import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT; class DocHelper { public static final String FIELD_1_TEXT = "field one text"; @@ -219,7 +219,7 @@ class DocHelper { */ public static SegmentInfo writeDoc(Directory dir, Document doc) throws IOException { - return writeDoc(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), Similarity.getDefault(), doc); + return writeDoc(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), Similarity.getDefault(), doc); } /** diff --git a/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java b/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java index 0cacd763143..9a387c86934 100755 --- a/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java +++ b/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java @@ -20,8 +20,6 @@ package org.apache.lucene.index; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; - import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -429,7 +427,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { private IndexWriter newWriter(Directory dir, boolean create) throws IOException { - final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED); + final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMergePolicy(new LogDocMergePolicy(writer)); return writer; } @@ -503,7 +501,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { public void testHangOnClose() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMergePolicy(new LogByteSizeMergePolicy(writer)); writer.setMaxBufferedDocs(5); writer.setUseCompoundFile(false); @@ -529,7 +527,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.close(); Directory dir2 = new MockRAMDirectory(); - writer = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer); lmp.setMinMergeMB(0.0001); writer.setMergePolicy(lmp); diff --git a/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/src/test/org/apache/lucene/index/TestAtomicUpdate.java index e3beb6dbbff..7e35462653e 100644 --- a/src/test/org/apache/lucene/index/TestAtomicUpdate.java +++ b/src/test/org/apache/lucene/index/TestAtomicUpdate.java @@ -26,7 +26,7 @@ import java.io.File; import java.io.IOException; public class TestAtomicUpdate extends LuceneTestCase { - private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT); + private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT); private Random RANDOM; public class MockIndexWriter extends IndexWriter { diff --git a/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java index e69f4116bb1..72258d2425a 100644 --- a/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java +++ b/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java @@ -45,7 +45,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.ReaderUtil; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; /* @@ -218,7 +217,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase hasTested29++; } - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); w.optimize(); w.close(); @@ -273,7 +272,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase } public void searchIndex(String dirName, String oldName) throws IOException { - //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)); + //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)); //Query query = parser.parse("handle:1"); dirName = fullDir(dirName); @@ -358,7 +357,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase Directory dir = FSDirectory.open(new File(dirName)); // open writer - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); // add 10 docs for(int i=0;i<10;i++) { @@ -402,7 +401,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase searcher.close(); // optimize - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); writer.optimize(); writer.close(); @@ -452,7 +451,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase searcher.close(); // optimize - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); writer.optimize(); writer.close(); @@ -474,7 +473,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase dirName = fullDir(dirName); Directory dir = FSDirectory.open(new File(dirName)); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setUseCompoundFile(doCFS); writer.setMaxBufferedDocs(10); @@ -485,7 +484,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase writer.close(); // open fresh writer so we get no prx file in the added segment - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setUseCompoundFile(doCFS); writer.setMaxBufferedDocs(10); addNoProxDoc(writer); @@ -512,7 +511,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase try { Directory dir = FSDirectory.open(new File(fullDir(outputDir))); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setRAMBufferSizeMB(16.0); for(int i=0;i<35;i++) { addDoc(writer, i); diff --git a/src/test/org/apache/lucene/index/TestCheckIndex.java b/src/test/org/apache/lucene/index/TestCheckIndex.java index 02861501e77..ce629019285 100644 --- a/src/test/org/apache/lucene/index/TestCheckIndex.java +++ b/src/test/org/apache/lucene/index/TestCheckIndex.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.ArrayList; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; @@ -35,7 +34,7 @@ public class TestCheckIndex extends LuceneTestCase { public void testDeletedDocs() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); Document doc = new Document(); diff --git a/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java index 8138c41e483..fb71e424c3e 100644 --- a/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java +++ b/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java @@ -25,13 +25,11 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; - import java.io.IOException; public class TestConcurrentMergeScheduler extends LuceneTestCase { - private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT); + private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT); private static class FailOnlyOnFlush extends MockRAMDirectory.Failure { boolean doFail; diff --git a/src/test/org/apache/lucene/index/TestCrash.java b/src/test/org/apache/lucene/index/TestCrash.java index d3c2967885c..e5b873628a7 100644 --- a/src/test/org/apache/lucene/index/TestCrash.java +++ b/src/test/org/apache/lucene/index/TestCrash.java @@ -20,7 +20,6 @@ package org.apache.lucene.index; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.NoLockFactory; @@ -36,7 +35,7 @@ public class TestCrash extends LuceneTestCase { private IndexWriter initIndex(MockRAMDirectory dir) throws IOException { dir.setLockFactory(NoLockFactory.getNoLockFactory()); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); //writer.setMaxBufferedDocs(2); writer.setMaxBufferedDocs(10); ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions(); diff --git a/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/src/test/org/apache/lucene/index/TestDeletionPolicy.java index 9f25b051986..af47daab120 100644 --- a/src/test/org/apache/lucene/index/TestDeletionPolicy.java +++ b/src/test/org/apache/lucene/index/TestDeletionPolicy.java @@ -34,7 +34,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; /* Verify we can read the pre-2.1 file format, do searches @@ -202,7 +201,7 @@ public class TestDeletionPolicy extends LuceneTestCase Directory dir = new RAMDirectory(); ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.close(); @@ -211,7 +210,7 @@ public class TestDeletionPolicy extends LuceneTestCase // Record last time when writer performed deletes of // past commits lastDeleteTime = System.currentTimeMillis(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); for(int j=0;j<17;j++) { addDoc(writer); @@ -272,7 +271,7 @@ public class TestDeletionPolicy extends LuceneTestCase Directory dir = new RAMDirectory(); policy.dir = dir; - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(10); writer.setUseCompoundFile(useCompoundFile); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -281,7 +280,7 @@ public class TestDeletionPolicy extends LuceneTestCase } writer.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.optimize(); writer.close(); @@ -319,7 +318,7 @@ public class TestDeletionPolicy extends LuceneTestCase // Open & close a writer and assert that it // actually removed something: int preCount = dir.listAll().length; - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.LIMITED); writer.close(); int postCount = dir.listAll().length; assertTrue(postCount < preCount); @@ -341,7 +340,7 @@ public class TestDeletionPolicy extends LuceneTestCase Directory dir = new MockRAMDirectory(); policy.dir = dir; - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); for(int i=0;i<10;i++) { addDoc(writer); @@ -360,7 +359,7 @@ public class TestDeletionPolicy extends LuceneTestCase assertTrue(lastCommit != null); // Now add 1 doc and optimize - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED); addDoc(writer); assertEquals(11, writer.numDocs()); writer.optimize(); @@ -369,7 +368,7 @@ public class TestDeletionPolicy extends LuceneTestCase assertEquals(7, IndexReader.listCommits(dir).size()); // Now open writer on the commit just before optimize: - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit); assertEquals(10, writer.numDocs()); // Should undo our rollback: @@ -381,7 +380,7 @@ public class TestDeletionPolicy extends LuceneTestCase assertEquals(11, r.numDocs()); r.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit); assertEquals(10, writer.numDocs()); // Commits the rollback: writer.close(); @@ -397,7 +396,7 @@ public class TestDeletionPolicy extends LuceneTestCase r.close(); // Reoptimize - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED); writer.optimize(); writer.close(); @@ -408,7 +407,7 @@ public class TestDeletionPolicy extends LuceneTestCase // Now open writer on the commit just before optimize, // but this time keeping only the last commit: - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit); assertEquals(10, writer.numDocs()); // Reader still sees optimized index, because writer @@ -444,7 +443,7 @@ public class TestDeletionPolicy extends LuceneTestCase Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(10); writer.setUseCompoundFile(useCompoundFile); for(int i=0;i<107;i++) { @@ -452,7 +451,7 @@ public class TestDeletionPolicy extends LuceneTestCase } writer.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.optimize(); writer.close(); @@ -487,7 +486,7 @@ public class TestDeletionPolicy extends LuceneTestCase KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N); for(int j=0;j thrown = new ArrayList(); - final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED) { + final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED) { @Override public void message(final String message) { if (message.startsWith("now flush at close") && 0 == thrown.size()) { @@ -4073,7 +4072,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1442 public void testDoubleOffsetCounting() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(f); @@ -4108,7 +4107,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1442 public void testDoubleOffsetCounting2() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(f); @@ -4130,7 +4129,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionCharAnalyzer() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "abcd ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(f); @@ -4152,7 +4151,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionWithCachingTokenFilter() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT); + Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); IndexWriter w = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); TokenStream stream = new CachingTokenFilter(analyzer.tokenStream("field", new StringReader("abcd "))); @@ -4176,7 +4175,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT); + Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); IndexWriter w = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); TeeSinkTokenFilter tee = new TeeSinkTokenFilter(analyzer.tokenStream("field", new StringReader("abcd "))); @@ -4202,7 +4201,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionStopFilter() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StopAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StopAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(f); @@ -4224,7 +4223,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionStandard() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "abcd the ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); @@ -4254,7 +4253,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionStandardEmptyField() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); @@ -4281,7 +4280,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionStandardEmptyField2() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "abcd", Field.Store.NO, @@ -4323,7 +4322,7 @@ public class TestIndexWriter extends LuceneTestCase { out.writeByte((byte) 42); out.close(); - new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED).close(); + new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED).close(); assertTrue(dir.fileExists("myrandomfile")); @@ -4339,7 +4338,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testDeadlock() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); Document doc = new Document(); doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, @@ -4351,7 +4350,7 @@ public class TestIndexWriter extends LuceneTestCase { // index has 2 segments MockRAMDirectory dir2 = new MockRAMDirectory(); - IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer2.addDocument(doc); writer2.close(); @@ -4389,7 +4388,7 @@ public class TestIndexWriter extends LuceneTestCase { if (w != null) { w.close(); } - w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); //((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions(); if (!first && !allowInterrupt) { @@ -4498,30 +4497,30 @@ public class TestIndexWriter extends LuceneTestCase { public void testIndexStoreCombos() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); byte[] b = new byte[50]; for(int i=0;i<50;i++) b[i] = (byte) (i+77); Document doc = new Document(); Field f = new Field("binary", b, 10, 17, Field.Store.YES); - f.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc1field1"))); + f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field1"))); Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED); - f2.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc1field2"))); + f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field2"))); doc.add(f); doc.add(f2); w.addDocument(doc); // add 2 docs to test in-memory merging - f.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc2field1"))); - f2.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc2field2"))); + f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc2field1"))); + f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc2field2"))); w.addDocument(doc); // force segment flush so we can force a segment merge with doc3 later. w.commit(); - f.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc3field1"))); - f2.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc3field2"))); + f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc3field1"))); + f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc3field2"))); w.addDocument(doc); w.commit(); @@ -4560,7 +4559,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1727: make sure doc fields are stored in order public void testStoredFieldsOrder() throws Throwable { Directory d = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO)); doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO)); @@ -4592,7 +4591,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testEmbeddedFFFF() throws Throwable { Directory d = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED)); w.addDocument(doc); @@ -4607,7 +4606,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testNoDocsIndex() throws Throwable { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(false); ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); writer.setInfoStream(new PrintStream(bos)); @@ -4625,7 +4624,7 @@ public class TestIndexWriter extends LuceneTestCase { final int NUM_THREADS = 5; final double RUN_SEC = 0.5; final Directory dir = new MockRAMDirectory(); - final IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + final IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); w.commit(); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[NUM_THREADS]; diff --git a/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/src/test/org/apache/lucene/index/TestIndexWriterDelete.java index 99561ba48a1..122fe7a9d2c 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriterDelete.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterDelete.java @@ -29,7 +29,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; public class TestIndexWriterDelete extends LuceneTestCase { @@ -43,7 +42,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setUseCompoundFile(true); modifier.setMaxBufferedDeleteTerms(1); @@ -80,7 +79,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); @@ -115,7 +114,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testMaxBufferedDeletes() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDeleteTerms(1); writer.deleteDocuments(new Term("foobar", "1")); writer.deleteDocuments(new Term("foobar", "1")); @@ -130,7 +129,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { for(int t=0;t<2;t++) { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(4); modifier.setMaxBufferedDeleteTerms(4); @@ -172,7 +171,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testBothDeletes() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(100); modifier.setMaxBufferedDeleteTerms(100); @@ -205,7 +204,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testBatchDeletes() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); @@ -249,7 +248,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAll() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); @@ -296,7 +295,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAllRollback() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); @@ -334,7 +333,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAllNRT() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); @@ -426,7 +425,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { // First build up a starting index: MockRAMDirectory startDir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(startDir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < 157; i++) { Document d = new Document(); d.add(new Field("id", Integer.toString(i), Field.Store.YES, @@ -449,7 +448,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { MockRAMDirectory dir = new MockRAMDirectory(startDir); dir.setPreventDoubleWrite(false); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(1000); // use flush or close modifier.setMaxBufferedDeleteTerms(1000); // use flush or close @@ -655,7 +654,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setUseCompoundFile(true); modifier.setMaxBufferedDeleteTerms(2); @@ -764,7 +763,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); dir.failOn(failure.reset()); diff --git a/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java index 3dadc93af2e..5f9e874d7bb 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java @@ -21,7 +21,6 @@ import java.util.Random; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.Directory; @@ -135,7 +134,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testRandomExceptions() throws Throwable { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions(); //writer.setMaxBufferedDocs(10); writer.setRAMBufferSizeMB(0.1); @@ -173,7 +172,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testRandomExceptionsThreads() throws Throwable { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions(); //writer.setMaxBufferedDocs(10); writer.setRAMBufferSizeMB(0.2); diff --git a/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java b/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java index 75aca4e6ce1..86aac34b8a7 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java @@ -75,10 +75,10 @@ public class TestIndexWriterLockRelease extends LuceneTestCase { IndexWriter im; FSDirectory dir = FSDirectory.open(this.__test_dir); try { - im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); + im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); } catch (FileNotFoundException e) { try { - im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); + im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); } catch (FileNotFoundException e1) { } } finally { diff --git a/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java b/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java index 71c06deda2f..098b3618d8d 100755 --- a/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java @@ -24,7 +24,6 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; import org.apache.lucene.util.LuceneTestCase; @@ -35,7 +34,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testNormalCase() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(10); writer.setMergeFactor(10); writer.setMergePolicy(new LogDocMergePolicy(writer)); @@ -52,7 +51,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testNoOverMerge() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(10); writer.setMergeFactor(10); writer.setMergePolicy(new LogDocMergePolicy(writer)); @@ -74,7 +73,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testForceFlush() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(10); writer.setMergeFactor(10); LogDocMergePolicy mp = new LogDocMergePolicy(writer); @@ -85,7 +84,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { addDoc(writer); writer.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(10); writer.setMergePolicy(mp); mp.setMinMergeDocs(100); @@ -100,7 +99,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testMergeFactorChange() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(10); writer.setMergeFactor(100); writer.setMergePolicy(new LogDocMergePolicy(writer)); @@ -126,7 +125,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testMaxBufferedDocsChange() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(101); writer.setMergeFactor(101); writer.setMergePolicy(new LogDocMergePolicy(writer)); @@ -140,7 +139,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { } writer.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(101); writer.setMergeFactor(101); writer.setMergePolicy(new LogDocMergePolicy(writer)); @@ -171,7 +170,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testMergeDocCount0() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMergePolicy(new LogDocMergePolicy(writer)); writer.setMaxBufferedDocs(10); writer.setMergeFactor(100); @@ -186,7 +185,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { reader.deleteDocuments(new Term("content", "aaa")); reader.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMergePolicy(new LogDocMergePolicy(writer)); writer.setMaxBufferedDocs(10); writer.setMergeFactor(5); diff --git a/src/test/org/apache/lucene/index/TestIndexWriterMerging.java b/src/test/org/apache/lucene/index/TestIndexWriterMerging.java index 0e3588b03ca..e6e30b5ba01 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriterMerging.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterMerging.java @@ -56,7 +56,7 @@ public class TestIndexWriterMerging extends LuceneTestCase Directory merged = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMergeFactor(2); writer.addIndexesNoOptimize(new Directory[]{indexA, indexB}); @@ -93,7 +93,7 @@ public class TestIndexWriterMerging extends LuceneTestCase private void fillIndex(Directory dir, int start, int numDocs) throws IOException { - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMergeFactor(2); writer.setMaxBufferedDocs(2); diff --git a/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/src/test/org/apache/lucene/index/TestIndexWriterReader.java index 88a09cae934..4ee1b93ec95 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -37,7 +37,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; import org.apache.lucene.util.ThreadInterruptedException; @@ -77,7 +76,7 @@ public class TestIndexWriterReader extends LuceneTestCase { boolean optimize = true; Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); // create the index @@ -112,7 +111,7 @@ public class TestIndexWriterReader extends LuceneTestCase { assertEquals(0, count(new Term("id", id10), r3)); assertEquals(1, count(new Term("id", Integer.toString(8000)), r3)); - writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED)); @@ -140,7 +139,7 @@ public class TestIndexWriterReader extends LuceneTestCase { boolean optimize = false; Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setInfoStream(infoStream); // create the index @@ -149,7 +148,7 @@ public class TestIndexWriterReader extends LuceneTestCase { // create a 2nd index Directory dir2 = new MockRAMDirectory(); - IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer2.setInfoStream(infoStream); createIndexNoClose(!optimize, "index2", writer2); @@ -187,13 +186,13 @@ public class TestIndexWriterReader extends LuceneTestCase { boolean optimize = false; Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setInfoStream(infoStream); // create a 2nd index Directory dir2 = new MockRAMDirectory(); - IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer2.setInfoStream(infoStream); createIndexNoClose(!optimize, "index2", writer2); @@ -222,7 +221,7 @@ public class TestIndexWriterReader extends LuceneTestCase { boolean optimize = true; Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setInfoStream(infoStream); // create the index @@ -261,7 +260,7 @@ public class TestIndexWriterReader extends LuceneTestCase { writer.close(); // reopen the writer to verify the delete made it to the directory - writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setInfoStream(infoStream); IndexReader w2r1 = writer.getReader(); @@ -276,7 +275,7 @@ public class TestIndexWriterReader extends LuceneTestCase { int numDirs = 3; Directory mainDir = new MockRAMDirectory(); - IndexWriter mainWriter = new IndexWriter(mainDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter mainWriter = new IndexWriter(mainDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); mainWriter.setInfoStream(infoStream); AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter); @@ -384,7 +383,7 @@ public class TestIndexWriterReader extends LuceneTestCase { this.numDirs = numDirs; this.mainWriter = mainWriter; addDir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(addDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer = new IndexWriter(addDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); for (int i = 0; i < NUM_INIT_DOCS; i++) { @@ -492,7 +491,7 @@ public class TestIndexWriterReader extends LuceneTestCase { */ public void doTestIndexWriterReopenSegment(boolean optimize) throws Exception { Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setInfoStream(infoStream); IndexReader r1 = writer.getReader(); @@ -530,7 +529,7 @@ public class TestIndexWriterReader extends LuceneTestCase { writer.close(); // test whether the changes made it to the directory - writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); IndexReader w2r1 = writer.getReader(); // insure the deletes were actually flushed to the directory @@ -571,7 +570,7 @@ public class TestIndexWriterReader extends LuceneTestCase { */ public static void createIndex(Directory dir1, String indexName, boolean multiSegment) throws IOException { - IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); w.setMergePolicy(new LogDocMergePolicy(w)); for (int i = 0; i < 100; i++) { @@ -606,7 +605,7 @@ public class TestIndexWriterReader extends LuceneTestCase { public void testMergeWarmer() throws Exception { Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setInfoStream(infoStream); @@ -641,7 +640,7 @@ public class TestIndexWriterReader extends LuceneTestCase { public void testAfterCommit() throws Exception { Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setInfoStream(infoStream); @@ -674,7 +673,7 @@ public class TestIndexWriterReader extends LuceneTestCase { // Make sure reader remains usable even if IndexWriter closes public void testAfterClose() throws Exception { Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setInfoStream(infoStream); @@ -704,7 +703,7 @@ public class TestIndexWriterReader extends LuceneTestCase { // Stress test reopen during addIndexes public void testDuringAddIndexes() throws Exception { Directory dir1 = new MockRAMDirectory(); - final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setInfoStream(infoStream); writer.setMergeFactor(2); @@ -782,7 +781,7 @@ public class TestIndexWriterReader extends LuceneTestCase { // Stress test reopen during add/delete public void testDuringAddDelete() throws Exception { Directory dir1 = new MockRAMDirectory(); - final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setInfoStream(infoStream); writer.setMergeFactor(2); @@ -863,7 +862,7 @@ public class TestIndexWriterReader extends LuceneTestCase { public void testExpungeDeletes() throws Throwable { Directory dir = new MockRAMDirectory(); - final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED)); @@ -888,7 +887,7 @@ public class TestIndexWriterReader extends LuceneTestCase { public void testDeletesNumDocs() throws Throwable { Directory dir = new MockRAMDirectory(); - final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED)); diff --git a/src/test/org/apache/lucene/index/TestLazyBug.java b/src/test/org/apache/lucene/index/TestLazyBug.java index 655b133fa36..c3fae95685b 100755 --- a/src/test/org/apache/lucene/index/TestLazyBug.java +++ b/src/test/org/apache/lucene/index/TestLazyBug.java @@ -18,7 +18,6 @@ package org.apache.lucene.index; */ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.*; @@ -64,7 +63,7 @@ public class TestLazyBug extends LuceneTestCase { Directory dir = new RAMDirectory(); try { Random r = newRandom(); - Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT); + Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT); IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); writer.setUseCompoundFile(false); diff --git a/src/test/org/apache/lucene/index/TestLazyProxSkipping.java b/src/test/org/apache/lucene/index/TestLazyProxSkipping.java index b2c84328b2a..c48b38140e4 100755 --- a/src/test/org/apache/lucene/index/TestLazyProxSkipping.java +++ b/src/test/org/apache/lucene/index/TestLazyProxSkipping.java @@ -30,7 +30,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; /** * Tests lazy skipping on the proximity file. @@ -61,7 +60,7 @@ public class TestLazyProxSkipping extends LuceneTestCase { int numDocs = 500; Directory directory = new SeekCountingDirectory(); - IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setUseCompoundFile(false); writer.setMaxBufferedDocs(10); for (int i = 0; i < numDocs; i++) { @@ -119,7 +118,7 @@ public class TestLazyProxSkipping extends LuceneTestCase { public void testSeek() throws IOException { Directory directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); for (int i = 0; i < 10; i++) { Document doc = new Document(); doc.add(new Field(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED)); diff --git a/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java index 3c2f18e34b2..8acf219b52a 100644 --- a/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java +++ b/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java @@ -32,7 +32,6 @@ import org.apache.lucene.document.Field.Store; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; /** * This testcase tests whether multi-level skipping is being used @@ -92,7 +91,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase { private static class PayloadAnalyzer extends Analyzer { @Override public TokenStream tokenStream(String fieldName, Reader reader) { - return new PayloadFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader)); + return new PayloadFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader)); } } diff --git a/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java b/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java index ac5ab1252ad..834037211ad 100644 --- a/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java +++ b/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java @@ -25,7 +25,6 @@ import org.apache.lucene.index.TestIndexWriterReader.HeavyAtomicInt; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; public class TestNRTReaderWithThreads extends LuceneTestCase { Random random = new Random(); @@ -33,7 +32,7 @@ public class TestNRTReaderWithThreads extends LuceneTestCase { public void testIndexing() throws Exception { Directory mainDir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(mainDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter writer = new IndexWriter(mainDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setUseCompoundFile(false); IndexReader reader = writer.getReader(); // start pooling readers diff --git a/src/test/org/apache/lucene/index/TestNorms.java b/src/test/org/apache/lucene/index/TestNorms.java index 9d8b2cca645..1a61f012e60 100755 --- a/src/test/org/apache/lucene/index/TestNorms.java +++ b/src/test/org/apache/lucene/index/TestNorms.java @@ -65,7 +65,7 @@ public class TestNorms extends LuceneTestCase { protected void setUp() throws Exception { super.setUp(); similarityOne = new SimilarityOne(); - anlzr = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + anlzr = new StandardAnalyzer(TEST_VERSION_CURRENT); } /** diff --git a/src/test/org/apache/lucene/index/TestOmitTf.java b/src/test/org/apache/lucene/index/TestOmitTf.java index 77caea03b7f..6d029b6be9f 100644 --- a/src/test/org/apache/lucene/index/TestOmitTf.java +++ b/src/test/org/apache/lucene/index/TestOmitTf.java @@ -66,7 +66,7 @@ public class TestOmitTf extends LuceneTestCase { // omitTermFreqAndPositions bit in the FieldInfo public void testOmitTermFreqAndPositions() throws Exception { Directory ram = new MockRAMDirectory(); - Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); Document d = new Document(); @@ -112,7 +112,7 @@ public class TestOmitTf extends LuceneTestCase { // omitTermFreqAndPositions for the same field works public void testMixedMerge() throws Exception { Directory ram = new MockRAMDirectory(); - Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(3); writer.setMergeFactor(2); @@ -165,7 +165,7 @@ public class TestOmitTf extends LuceneTestCase { // field, public void testMixedRAM() throws Exception { Directory ram = new MockRAMDirectory(); - Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(10); writer.setMergeFactor(2); @@ -213,7 +213,7 @@ public class TestOmitTf extends LuceneTestCase { // Verifies no *.prx exists when all fields omit term freq: public void testNoPrxFile() throws Throwable { Directory ram = new MockRAMDirectory(); - Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(3); writer.setMergeFactor(2); @@ -244,7 +244,7 @@ public class TestOmitTf extends LuceneTestCase { // Test scores with one field with Term Freqs and one without, otherwise with equal content public void testBasic() throws Exception { Directory dir = new MockRAMDirectory(); - Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); writer.setMergeFactor(2); writer.setMaxBufferedDocs(2); diff --git a/src/test/org/apache/lucene/index/TestParallelReader.java b/src/test/org/apache/lucene/index/TestParallelReader.java index 27ada396792..62973174d2f 100644 --- a/src/test/org/apache/lucene/index/TestParallelReader.java +++ b/src/test/org/apache/lucene/index/TestParallelReader.java @@ -106,7 +106,7 @@ public class TestParallelReader extends LuceneTestCase { // one document only: Directory dir2 = new MockRAMDirectory(); - IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document d3 = new Document(); d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED)); w2.addDocument(d3); @@ -151,13 +151,13 @@ public class TestParallelReader extends LuceneTestCase { Directory dir2 = getDir2(); // add another document to ensure that the indexes are not optimized - IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document d = new Document(); d.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED)); modifier.addDocument(d); modifier.close(); - modifier = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + modifier = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); d = new Document(); d.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED)); modifier.addDocument(d); @@ -170,7 +170,7 @@ public class TestParallelReader extends LuceneTestCase { assertFalse(pr.isOptimized()); pr.close(); - modifier = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + modifier = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); modifier.optimize(); modifier.close(); @@ -182,7 +182,7 @@ public class TestParallelReader extends LuceneTestCase { pr.close(); - modifier = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + modifier = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); modifier.optimize(); modifier.close(); @@ -233,7 +233,7 @@ public class TestParallelReader extends LuceneTestCase { // Fields 1-4 indexed together: private Searcher single() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document d1 = new Document(); d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED)); d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED)); @@ -263,7 +263,7 @@ public class TestParallelReader extends LuceneTestCase { private Directory getDir1() throws IOException { Directory dir1 = new MockRAMDirectory(); - IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document d1 = new Document(); d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED)); d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED)); @@ -278,7 +278,7 @@ public class TestParallelReader extends LuceneTestCase { private Directory getDir2() throws IOException { Directory dir2 = new RAMDirectory(); - IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document d3 = new Document(); d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED)); d3.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED)); diff --git a/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java b/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java index 5f5a087097f..70d421ae535 100644 --- a/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java +++ b/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java @@ -20,7 +20,6 @@ package org.apache.lucene.index; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; import org.apache.lucene.analysis.SimpleAnalyzer; @@ -48,7 +47,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { */ public void testEmptyIndex() throws IOException { RAMDirectory rd1 = new MockRAMDirectory(); - IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, + IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED); iw.close(); @@ -56,7 +55,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { RAMDirectory rdOut = new MockRAMDirectory(); - IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, + IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED); ParallelReader pr = new ParallelReader(); pr.add(IndexReader.open(rd1,true)); @@ -81,7 +80,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { public void testEmptyIndexWithVectors() throws IOException { RAMDirectory rd1 = new MockRAMDirectory(); { - IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, + IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.add(new Field("test", "", Store.NO, Index.ANALYZED, @@ -96,7 +95,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { ir.deleteDocument(0); ir.close(); - iw = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), false, + iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), false, MaxFieldLength.UNLIMITED); iw.optimize(); iw.close(); @@ -104,7 +103,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { RAMDirectory rd2 = new MockRAMDirectory(); { - IndexWriter iw = new IndexWriter(rd2, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, + IndexWriter iw = new IndexWriter(rd2, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED); Document doc = new Document(); iw.addDocument(doc); @@ -113,7 +112,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { RAMDirectory rdOut = new MockRAMDirectory(); - IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, + IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED); ParallelReader pr = new ParallelReader(); pr.add(IndexReader.open(rd1,true)); diff --git a/src/test/org/apache/lucene/index/TestParallelTermEnum.java b/src/test/org/apache/lucene/index/TestParallelTermEnum.java index a2bf173a6db..a793b065566 100755 --- a/src/test/org/apache/lucene/index/TestParallelTermEnum.java +++ b/src/test/org/apache/lucene/index/TestParallelTermEnum.java @@ -20,8 +20,6 @@ package org.apache.lucene.index; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; - import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -39,7 +37,7 @@ public class TestParallelTermEnum extends LuceneTestCase { Document doc; RAMDirectory rd1 = new RAMDirectory(); - IndexWriter iw1 = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw1 = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); doc = new Document(); doc.add(new Field("field1", "the quick brown fox jumps", Store.YES, @@ -51,7 +49,7 @@ public class TestParallelTermEnum extends LuceneTestCase { iw1.close(); RAMDirectory rd2 = new RAMDirectory(); - IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); doc = new Document(); doc.add(new Field("field0", "", Store.NO, Index.ANALYZED)); diff --git a/src/test/org/apache/lucene/index/TestPayloads.java b/src/test/org/apache/lucene/index/TestPayloads.java index 0d5d7c14a61..25a04f20ea8 100644 --- a/src/test/org/apache/lucene/index/TestPayloads.java +++ b/src/test/org/apache/lucene/index/TestPayloads.java @@ -41,7 +41,6 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.UnicodeUtil; -import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; @@ -396,7 +395,7 @@ public class TestPayloads extends LuceneTestCase { @Override public TokenStream tokenStream(String fieldName, Reader reader) { PayloadData payload = fieldToData.get(fieldName); - TokenStream ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader); + TokenStream ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); if (payload != null) { if (payload.numFieldInstancesToSkip == 0) { ts = new PayloadFilter(ts, payload.data, payload.offset, payload.length); @@ -469,7 +468,7 @@ public class TestPayloads extends LuceneTestCase { final ByteArrayPool pool = new ByteArrayPool(numThreads, 5); Directory dir = new RAMDirectory(); - final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); final String field = "test"; Thread[] ingesters = new Thread[numThreads]; diff --git a/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/src/test/org/apache/lucene/index/TestSegmentTermDocs.java index 1aa7dfe8eea..90baf959e73 100644 --- a/src/test/org/apache/lucene/index/TestSegmentTermDocs.java +++ b/src/test/org/apache/lucene/index/TestSegmentTermDocs.java @@ -18,7 +18,6 @@ package org.apache.lucene.index; */ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.Directory; @@ -103,7 +102,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { public void testSkipTo(int indexDivisor) throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Term ta = new Term("content","aaa"); diff --git a/src/test/org/apache/lucene/index/TestSegmentTermEnum.java b/src/test/org/apache/lucene/index/TestSegmentTermEnum.java index a43477fb278..6f657c57d6f 100644 --- a/src/test/org/apache/lucene/index/TestSegmentTermEnum.java +++ b/src/test/org/apache/lucene/index/TestSegmentTermEnum.java @@ -20,8 +20,6 @@ package org.apache.lucene.index; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; - import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -38,7 +36,7 @@ public class TestSegmentTermEnum extends LuceneTestCase { IndexWriter writer = null; - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); // ADD 100 documents with term : aaa // add 100 documents with terms: aaa bbb @@ -54,7 +52,7 @@ public class TestSegmentTermEnum extends LuceneTestCase verifyDocFreq(); // merge segments by optimizing the index - writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); writer.optimize(); writer.close(); @@ -65,7 +63,7 @@ public class TestSegmentTermEnum extends LuceneTestCase public void testPrevTermAtEnd() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); addDoc(writer, "aaa bbb"); writer.close(); SegmentReader reader = SegmentReader.getOnlySegmentReader(dir); diff --git a/src/test/org/apache/lucene/index/TestStressIndexing.java b/src/test/org/apache/lucene/index/TestStressIndexing.java index bde80c100dd..8d6083a5c56 100644 --- a/src/test/org/apache/lucene/index/TestStressIndexing.java +++ b/src/test/org/apache/lucene/index/TestStressIndexing.java @@ -26,7 +26,7 @@ import java.util.Random; import java.io.File; public class TestStressIndexing extends LuceneTestCase { - private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT); + private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT); private Random RANDOM; private static abstract class TimedThread extends Thread { diff --git a/src/test/org/apache/lucene/index/TestStressIndexing2.java b/src/test/org/apache/lucene/index/TestStressIndexing2.java index 14511d1c4e1..2d1209cfcd9 100644 --- a/src/test/org/apache/lucene/index/TestStressIndexing2.java +++ b/src/test/org/apache/lucene/index/TestStressIndexing2.java @@ -19,7 +19,6 @@ import org.apache.lucene.document.*; import org.apache.lucene.analysis.*; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; import org.apache.lucene.util.StringHelper; import org.apache.lucene.search.TermQuery; @@ -124,7 +123,7 @@ public class TestStressIndexing2 extends LuceneTestCase { public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); - IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); w.setUseCompoundFile(false); /*** @@ -176,7 +175,7 @@ public class TestStressIndexing2 extends LuceneTestCase { public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); for(int iter=0;iter<3;iter++) { - IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); w.setUseCompoundFile(false); // force many merges @@ -219,7 +218,7 @@ public class TestStressIndexing2 extends LuceneTestCase { public static void indexSerial(Map docs, Directory dir) throws IOException { - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); // index all docs in a single thread Iterator iter = docs.values().iterator(); diff --git a/src/test/org/apache/lucene/index/TestThreadedOptimize.java b/src/test/org/apache/lucene/index/TestThreadedOptimize.java index 09a68bd4baf..020e63f36b9 100644 --- a/src/test/org/apache/lucene/index/TestThreadedOptimize.java +++ b/src/test/org/apache/lucene/index/TestThreadedOptimize.java @@ -24,7 +24,6 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; import org.apache.lucene.util.English; @@ -35,7 +34,7 @@ import java.io.File; public class TestThreadedOptimize extends LuceneTestCase { - private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT); + private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT); private final static int NUM_THREADS = 3; //private final static int NUM_THREADS = 5; diff --git a/src/test/org/apache/lucene/index/TestTransactionRollback.java b/src/test/org/apache/lucene/index/TestTransactionRollback.java index 1126ead0692..2086e5187d6 100644 --- a/src/test/org/apache/lucene/index/TestTransactionRollback.java +++ b/src/test/org/apache/lucene/index/TestTransactionRollback.java @@ -27,8 +27,6 @@ import java.util.Map; import java.util.HashMap; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; - import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -67,7 +65,7 @@ public class TestTransactionRollback extends LuceneTestCase { if (last==null) throw new RuntimeException("Couldn't find commit point "+id); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), new RollbackDeletionPolicy(id), MaxFieldLength.UNLIMITED, last); Map data = new HashMap(); data.put("index", "Rolled back to 1-"+id); @@ -129,7 +127,7 @@ public class TestTransactionRollback extends LuceneTestCase { //Build index, of records 1 to 100, committing after each batch of 10 IndexDeletionPolicy sdp=new KeepAllDeletionPolicy(); - IndexWriter w=new IndexWriter(dir,new WhitespaceAnalyzer(Version.LUCENE_CURRENT),sdp,MaxFieldLength.UNLIMITED); + IndexWriter w=new IndexWriter(dir,new WhitespaceAnalyzer(TEST_VERSION_CURRENT),sdp,MaxFieldLength.UNLIMITED); for(int currentRecordId=1;currentRecordId<=100;currentRecordId++) { Document doc=new Document(); doc.add(new Field(FIELD_RECORD_ID,""+currentRecordId,Field.Store.YES,Field.Index.ANALYZED)); @@ -197,7 +195,7 @@ public class TestTransactionRollback extends LuceneTestCase { for(int i=0;i<2;i++) { // Unless you specify a prior commit point, rollback // should not work: - new IndexWriter(dir,new WhitespaceAnalyzer(Version.LUCENE_CURRENT), + new IndexWriter(dir,new WhitespaceAnalyzer(TEST_VERSION_CURRENT), new DeleteLastCommitPolicy(), MaxFieldLength.UNLIMITED).close(); IndexReader r = IndexReader.open(dir, true); diff --git a/src/test/org/apache/lucene/index/TestTransactions.java b/src/test/org/apache/lucene/index/TestTransactions.java index 059b0d4dfbd..1e0aefb620d 100644 --- a/src/test/org/apache/lucene/index/TestTransactions.java +++ b/src/test/org/apache/lucene/index/TestTransactions.java @@ -88,12 +88,12 @@ public class TestTransactions extends LuceneTestCase @Override public void doWork() throws Throwable { - IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer1.setMaxBufferedDocs(3); writer1.setMergeFactor(2); ((ConcurrentMergeScheduler) writer1.getMergeScheduler()).setSuppressExceptions(); - IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); // Intentionally use different params so flush/merge // happen @ different times writer2.setMaxBufferedDocs(2); @@ -178,7 +178,7 @@ public class TestTransactions extends LuceneTestCase } public void initIndex(Directory dir) throws Throwable { - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); for(int j=0; j<7; j++) { Document d = new Document(); int n = RANDOM.nextInt(); diff --git a/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java b/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java index cfe45893a6d..be1af8c865a 100644 --- a/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java +++ b/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java @@ -44,7 +44,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { public void testMultiAnalyzer() throws ParseException { - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "", new MultiAnalyzer()); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "", new MultiAnalyzer()); // trivial, no multiple tokens: assertEquals("foo", qp.parse("foo").toString()); @@ -135,9 +135,9 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { @Override public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader); + TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader); result = new TestFilter(result); - result = new LowerCaseFilter(Version.LUCENE_CURRENT, result); + result = new LowerCaseFilter(TEST_VERSION_CURRENT, result); return result; } } @@ -203,9 +203,9 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { @Override public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader); + TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader); result = new TestPosIncrementFilter(result); - result = new LowerCaseFilter(Version.LUCENE_CURRENT, result); + result = new LowerCaseFilter(TEST_VERSION_CURRENT, result); return result; } } @@ -242,7 +242,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase { private final static class DumbQueryParser extends QueryParser { public DumbQueryParser(String f, Analyzer a) { - super(Version.LUCENE_CURRENT, f, a); + super(TEST_VERSION_CURRENT, f, a); } /** expose super's version */ diff --git a/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java b/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java index adb5ea9df50..824af0e717d 100644 --- a/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java +++ b/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java @@ -36,7 +36,6 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; /** * Tests QueryParser. @@ -60,18 +59,18 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { String[] fields = {"b", "t"}; Occur occur[] = {Occur.SHOULD, Occur.SHOULD}; TestQueryParser.QPTestAnalyzer a = new TestQueryParser.QPTestAnalyzer(); - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, a); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, a); Query q = mfqp.parse(qtxt); assertEquals(expectedRes, q.toString()); - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, qtxt, fields, occur, a); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, qtxt, fields, occur, a); assertEquals(expectedRes, q.toString()); } public void testSimple() throws Exception { String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new StandardAnalyzer(TEST_VERSION_CURRENT)); Query q = mfqp.parse("one"); assertEquals("b:one t:one", q.toString()); @@ -134,7 +133,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { boosts.put("b", Float.valueOf(5)); boosts.put("t", Float.valueOf(10)); String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new StandardAnalyzer(TEST_VERSION_CURRENT), boosts); //Check for simple @@ -160,24 +159,24 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testStaticMethod1() throws ParseException { String[] fields = {"b", "t"}; String[] queries = {"one", "two"}; - Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, new StandardAnalyzer(TEST_VERSION_CURRENT)); assertEquals("b:one t:two", q.toString()); String[] queries2 = {"+one", "+two"}; - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries2, fields, new StandardAnalyzer(TEST_VERSION_CURRENT)); assertEquals("(+b:one) (+t:two)", q.toString()); String[] queries3 = {"one", "+two"}; - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries3, fields, new StandardAnalyzer(TEST_VERSION_CURRENT)); assertEquals("b:one (+t:two)", q.toString()); String[] queries4 = {"one +more", "+two"}; - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries4, fields, new StandardAnalyzer(TEST_VERSION_CURRENT)); assertEquals("(b:one +b:more) (+t:two)", q.toString()); String[] queries5 = {"blah"}; try { - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries5, fields, new StandardAnalyzer(TEST_VERSION_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -187,11 +186,11 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { TestQueryParser.QPTestAnalyzer stopA = new TestQueryParser.QPTestAnalyzer(); String[] queries6 = {"((+stop))", "+((stop))"}; - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries6, fields, stopA); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries6, fields, stopA); assertEquals("", q.toString()); String[] queries7 = {"one ((+stop)) +more", "+((stop)) +two"}; - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries7, fields, stopA); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries7, fields, stopA); assertEquals("(b:one +b:more) (+t:two)", q.toString()); } @@ -199,15 +198,15 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testStaticMethod2() throws ParseException { String[] fields = {"b", "t"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT)); assertEquals("+b:one -t:one", q.toString()); - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT)); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -219,15 +218,15 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { //int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));//, fields, flags, new StandardAnalyzer()); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));//, fields, flags, new StandardAnalyzer()); assertEquals("+b:one -t:one", q.toString()); - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT)); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -239,12 +238,12 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { String[] fields = {"f1", "f2", "f3"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD}; - Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT)); assertEquals("+f1:one -f2:two f3:three", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -255,12 +254,12 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { String[] queries = {"one", "two"}; String[] fields = {"b", "t"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT)); assertEquals("+b:one -t:two", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -269,7 +268,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testAnalyzerReturningNull() throws ParseException { String[] fields = new String[] { "f1", "f2", "f3" }; - MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new AnalyzerReturningNull()); + MultiFieldQueryParser parser = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new AnalyzerReturningNull()); Query q = parser.parse("bla AND blo"); assertEquals("+(f2:bla f3:bla) +(f2:blo f3:blo)", q.toString()); // the following queries are not affected as their terms are not analyzed anyway: @@ -282,7 +281,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { } public void testStopWordSearching() throws Exception { - Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); Directory ramDir = new RAMDirectory(); IndexWriter iw = new IndexWriter(ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); @@ -291,7 +290,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { iw.close(); MultiFieldQueryParser mfqp = - new MultiFieldQueryParser(Version.LUCENE_CURRENT, new String[] {"body"}, analyzer); + new MultiFieldQueryParser(TEST_VERSION_CURRENT, new String[] {"body"}, analyzer); mfqp.setDefaultOperator(QueryParser.Operator.AND); Query q = mfqp.parse("the footest"); IndexSearcher is = new IndexSearcher(ramDir, true); @@ -304,7 +303,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { * Return empty tokens for field "f1". */ private static class AnalyzerReturningNull extends Analyzer { - StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + StandardAnalyzer stdAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); public AnalyzerReturningNull() { } diff --git a/src/test/org/apache/lucene/queryParser/TestQueryParser.java b/src/test/org/apache/lucene/queryParser/TestQueryParser.java index 340d858ff2b..8dd6e94bfff 100644 --- a/src/test/org/apache/lucene/queryParser/TestQueryParser.java +++ b/src/test/org/apache/lucene/queryParser/TestQueryParser.java @@ -64,7 +64,6 @@ import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.util.LocalizedTestCase; -import org.apache.lucene.util.Version; /** * Tests QueryParser. @@ -128,13 +127,13 @@ public class TestQueryParser extends LocalizedTestCase { /** Filters LowerCaseTokenizer with StopFilter. */ @Override public final TokenStream tokenStream(String fieldName, Reader reader) { - return new QPTestFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader)); + return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader)); } } public static class QPTestParser extends QueryParser { public QPTestParser(String f, Analyzer a) { - super(Version.LUCENE_CURRENT, f, a); + super(TEST_VERSION_CURRENT, f, a); } @Override @@ -158,8 +157,8 @@ public class TestQueryParser extends LocalizedTestCase { public QueryParser getParser(Analyzer a) throws Exception { if (a == null) - a = new SimpleAnalyzer(Version.LUCENE_CURRENT); - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", a); + a = new SimpleAnalyzer(TEST_VERSION_CURRENT); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a); qp.setDefaultOperator(QueryParser.OR_OPERATOR); return qp; } @@ -228,8 +227,8 @@ public class TestQueryParser extends LocalizedTestCase { public Query getQueryDOA(String query, Analyzer a) throws Exception { if (a == null) - a = new SimpleAnalyzer(Version.LUCENE_CURRENT); - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", a); + a = new SimpleAnalyzer(TEST_VERSION_CURRENT); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a); qp.setDefaultOperator(QueryParser.AND_OPERATOR); return qp.parse(query); } @@ -253,8 +252,8 @@ public class TestQueryParser extends LocalizedTestCase { public void testSimple() throws Exception { assertQueryEquals("term term term", null, "term term term"); - assertQueryEquals("türm term term", new WhitespaceAnalyzer(Version.LUCENE_CURRENT), "türm term term"); - assertQueryEquals("ümlaut", new WhitespaceAnalyzer(Version.LUCENE_CURRENT), "ümlaut"); + assertQueryEquals("türm term term", new WhitespaceAnalyzer(TEST_VERSION_CURRENT), "türm term term"); + assertQueryEquals("ümlaut", new WhitespaceAnalyzer(TEST_VERSION_CURRENT), "ümlaut"); assertQueryEquals("\"\"", new KeywordAnalyzer(), ""); assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:"); @@ -301,7 +300,7 @@ public class TestQueryParser extends LocalizedTestCase { assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null, "+(title:dog title:cat) -author:\"bob dole\""); - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new StandardAnalyzer(TEST_VERSION_CURRENT)); // make sure OR is the default: assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator()); qp.setDefaultOperator(QueryParser.AND_OPERATOR); @@ -311,7 +310,7 @@ public class TestQueryParser extends LocalizedTestCase { } public void testPunct() throws Exception { - Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT); + Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); assertQueryEquals("a&b", a, "a&b"); assertQueryEquals("a&&b", a, "a&&b"); assertQueryEquals(".NET", a, ".NET"); @@ -331,7 +330,7 @@ public class TestQueryParser extends LocalizedTestCase { assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); + Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -456,7 +455,7 @@ public class TestQueryParser extends LocalizedTestCase { assertQueryEquals("[ a TO z]", null, "[a TO z]"); assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod()); - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer(Version.LUCENE_CURRENT)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new SimpleAnalyzer(TEST_VERSION_CURRENT)); qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod()); @@ -473,7 +472,7 @@ public class TestQueryParser extends LocalizedTestCase { public void testFarsiRangeCollating() throws Exception { RAMDirectory ramDir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, + IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); doc.add(new Field("content","\u0633\u0627\u0628", @@ -482,7 +481,7 @@ public class TestQueryParser extends LocalizedTestCase { iw.close(); IndexSearcher is = new IndexSearcher(ramDir, true); - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "content", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "content", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)); // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi @@ -580,7 +579,7 @@ public class TestQueryParser extends LocalizedTestCase { final String defaultField = "default"; final String monthField = "month"; final String hourField = "hour"; - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer(Version.LUCENE_CURRENT)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new SimpleAnalyzer(TEST_VERSION_CURRENT)); // Don't set any date resolution and verify if DateField is used assertDateRangeQueryEquals(qp, defaultField, startDate, endDate, @@ -621,7 +620,7 @@ public class TestQueryParser extends LocalizedTestCase { } public void testEscaped() throws Exception { - Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT); + Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); /*assertQueryEquals("\\[brackets", a, "\\[brackets"); assertQueryEquals("\\[brackets", null, "brackets"); @@ -715,7 +714,7 @@ public class TestQueryParser extends LocalizedTestCase { } public void testQueryStringEscaping() throws Exception { - Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT); + Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c"); assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c"); @@ -802,8 +801,8 @@ public class TestQueryParser extends LocalizedTestCase { throws Exception { Set stopWords = new HashSet(1); stopWords.add("on"); - StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, stopWords); - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", oneStopAnalyzer); + StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, stopWords); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", oneStopAnalyzer); Query q = qp.parse("on^1.0"); assertNotNull(q); q = qp.parse("\"hello\"^2.0"); @@ -815,7 +814,7 @@ public class TestQueryParser extends LocalizedTestCase { q = qp.parse("\"on\"^1.0"); assertNotNull(q); - QueryParser qp2 = new QueryParser(Version.LUCENE_CURRENT, "field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + QueryParser qp2 = new QueryParser(TEST_VERSION_CURRENT, "field", new StandardAnalyzer(TEST_VERSION_CURRENT)); q = qp2.parse("the^3"); // "the" is a stop word so the result is an empty query: assertNotNull(q); @@ -844,7 +843,7 @@ public class TestQueryParser extends LocalizedTestCase { public void testCustomQueryParserWildcard() { try { - new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("a?t"); + new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("a?t"); fail("Wildcard queries should not be allowed"); } catch (ParseException expected) { // expected exception @@ -853,7 +852,7 @@ public class TestQueryParser extends LocalizedTestCase { public void testCustomQueryParserFuzzy() throws Exception { try { - new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("xunit~"); + new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("xunit~"); fail("Fuzzy queries should not be allowed"); } catch (ParseException expected) { // expected exception @@ -863,7 +862,7 @@ public class TestQueryParser extends LocalizedTestCase { public void testBooleanQuery() throws Exception { BooleanQuery.setMaxClauseCount(2); try { - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)); qp.parse("one two three"); fail("ParseException expected due to too many boolean clauses"); } catch (ParseException expected) { @@ -875,7 +874,7 @@ public class TestQueryParser extends LocalizedTestCase { * This test differs from TestPrecedenceQueryParser */ public void testPrecedence() throws Exception { - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)); Query query1 = qp.parse("A AND B OR C AND D"); Query query2 = qp.parse("+A +B +C +D"); assertEquals(query1, query2); @@ -883,7 +882,7 @@ public class TestQueryParser extends LocalizedTestCase { public void testLocalDateFormat() throws IOException, ParseException { RAMDirectory ramDir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw); addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw); iw.close(); @@ -899,7 +898,7 @@ public class TestQueryParser extends LocalizedTestCase { public void testStarParsing() throws Exception { final int[] type = new int[1]; - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)) { + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)) { @Override protected Query getWildcardQuery(String field, String termStr) throws ParseException { // override error checking of superclass @@ -958,7 +957,7 @@ public class TestQueryParser extends LocalizedTestCase { } public void testStopwords() throws Exception { - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "foo"))); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "foo"))); Query result = qp.parse("a:the OR a:foo"); assertNotNull("result is null and it shouldn't be", result); assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery); @@ -974,7 +973,7 @@ public class TestQueryParser extends LocalizedTestCase { } public void testPositionIncrement() throws Exception { - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "in", "are", "this"))); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "in", "are", "this"))); qp.setEnablePositionIncrements(true); String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; // 0 2 5 7 8 @@ -991,7 +990,7 @@ public class TestQueryParser extends LocalizedTestCase { } public void testMatchAllDocs() throws Exception { - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)); assertEquals(new MatchAllDocsQuery(), qp.parse("*:*")); assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)")); BooleanQuery bq = (BooleanQuery)qp.parse("+*:* -*:*"); @@ -1000,7 +999,7 @@ public class TestQueryParser extends LocalizedTestCase { } private void assertHits(int expected, String query, IndexSearcher is) throws ParseException, IOException { - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "date", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "date", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)); qp.setLocale(Locale.ENGLISH); Query q = qp.parse(query); ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs; @@ -1028,7 +1027,7 @@ public class TestQueryParser extends LocalizedTestCase { // "match" public void testPositionIncrements() throws Exception { Directory dir = new MockRAMDirectory(); - Analyzer a = new StandardAnalyzer(Version.LUCENE_CURRENT); + Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT); IndexWriter w = new IndexWriter(dir, a, IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.add(new Field("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED)); @@ -1036,7 +1035,7 @@ public class TestQueryParser extends LocalizedTestCase { IndexReader r = w.getReader(); w.close(); IndexSearcher s = new IndexSearcher(r); - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "f", a); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "f", a); Query q = qp.parse("\"wizard of ozzy\""); assertEquals(1, s.search(q, 1).totalHits); r.close(); diff --git a/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/src/test/org/apache/lucene/search/BaseTestRangeFilter.java index fd6e1a37c0b..29a09fb896b 100644 --- a/src/test/org/apache/lucene/search/BaseTestRangeFilter.java +++ b/src/test/org/apache/lucene/search/BaseTestRangeFilter.java @@ -20,8 +20,6 @@ package org.apache.lucene.search; import java.util.Random; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; - import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -98,7 +96,7 @@ public class BaseTestRangeFilter extends LuceneTestCase { try { /* build an index */ - IndexWriter writer = new IndexWriter(index.index, new SimpleAnalyzer(Version.LUCENE_CURRENT), T, + IndexWriter writer = new IndexWriter(index.index, new SimpleAnalyzer(TEST_VERSION_CURRENT), T, IndexWriter.MaxFieldLength.LIMITED); for (int d = minId; d <= maxId; d++) { diff --git a/src/test/org/apache/lucene/search/QueryUtils.java b/src/test/org/apache/lucene/search/QueryUtils.java index 7e771a5d42b..7116500e4dc 100644 --- a/src/test/org/apache/lucene/search/QueryUtils.java +++ b/src/test/org/apache/lucene/search/QueryUtils.java @@ -15,7 +15,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.Version; +import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT; /** * Copyright 2005 Apache Software Foundation @@ -200,7 +200,7 @@ public class QueryUtils { private static RAMDirectory makeEmptyIndex(final int numDeletedDocs) throws IOException { RAMDirectory d = new RAMDirectory(); - IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, + IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.LIMITED); for (int i = 0; i < numDeletedDocs; i++) { w.addDocument(new Document()); diff --git a/src/test/org/apache/lucene/search/TestBoolean2.java b/src/test/org/apache/lucene/search/TestBoolean2.java index afba9d85159..b131654b558 100644 --- a/src/test/org/apache/lucene/search/TestBoolean2.java +++ b/src/test/org/apache/lucene/search/TestBoolean2.java @@ -32,7 +32,6 @@ import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; /** Test BooleanQuery2 against BooleanQuery by overriding the standard query parser. * This also tests the scoring order of BooleanQuery. @@ -51,7 +50,7 @@ public class TestBoolean2 extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); RAMDirectory directory = new RAMDirectory(); - IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED)); @@ -68,14 +67,14 @@ public class TestBoolean2 extends LuceneTestCase { int docCount = 0; do { final Directory copy = new RAMDirectory(dir2); - IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); w.addIndexesNoOptimize(new Directory[] {copy}); docCount = w.maxDoc(); w.close(); mulFactor *= 2; } while(docCount < 3000); - IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.add(new Field("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED)); for(int i=0;i