From efb74380fda0da18650dbc66372a7bb1cd41dcf6 Mon Sep 17 00:00:00 2001
From: Uwe Schindler
Date: Sat, 27 Feb 2010 19:14:01 +0000
Subject: [PATCH] LUCENE-2285: Code cleanups to remove compiler warnings in
eclipse.
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@917019 13f79535-47bb-0310-9956-ffa450edef68
---
CHANGES.txt | 13 ++--
.../lucene/analysis/ar/ArabicAnalyzer.java | 17 ++---
.../lucene/analysis/bg/BulgarianAnalyzer.java | 18 ++---
.../lucene/analysis/br/BrazilianAnalyzer.java | 15 ++--
.../lucene/analysis/cjk/CJKAnalyzer.java | 1 -
.../lucene/analysis/cn/ChineseAnalyzer.java | 11 +--
.../HyphenationCompoundWordTokenFilter.java | 8 +--
.../compound/hyphenation/CharVector.java | 2 +-
.../compound/hyphenation/Hyphenation.java | 5 --
.../compound/hyphenation/HyphenationTree.java | 8 +--
.../compound/hyphenation/PatternConsumer.java | 2 +-
.../compound/hyphenation/PatternParser.java | 16 +++--
.../compound/hyphenation/TernaryTree.java | 8 +--
.../lucene/analysis/cz/CzechAnalyzer.java | 21 +++---
.../lucene/analysis/da/DanishAnalyzer.java | 16 +++--
.../lucene/analysis/de/GermanAnalyzer.java | 18 ++---
.../lucene/analysis/el/GreekAnalyzer.java | 21 +++---
.../lucene/analysis/en/EnglishAnalyzer.java | 16 +++--
.../lucene/analysis/es/SpanishAnalyzer.java | 16 +++--
.../lucene/analysis/fa/PersianAnalyzer.java | 12 ++--
.../lucene/analysis/fi/FinnishAnalyzer.java | 16 +++--
.../lucene/analysis/fr/FrenchAnalyzer.java | 17 ++---
.../lucene/analysis/hi/HindiAnalyzer.java | 18 ++---
.../lucene/analysis/hu/HungarianAnalyzer.java | 16 +++--
.../lucene/analysis/it/ItalianAnalyzer.java | 16 +++--
.../miscellaneous/PatternAnalyzer.java | 2 +-
.../analysis/ngram/EdgeNGramTokenFilter.java | 2 +-
.../analysis/ngram/NGramTokenFilter.java | 2 +-
.../lucene/analysis/no/NorwegianAnalyzer.java | 16 +++--
.../analysis/pt/PortugueseAnalyzer.java | 16 +++--
.../query/QueryAutoStopWordAnalyzer.java | 2 +-
.../lucene/analysis/ro/RomanianAnalyzer.java | 16 +++--
.../lucene/analysis/ru/RussianAnalyzer.java | 22 +++---
.../lucene/analysis/ru/RussianStemmer.java | 22 ++----
.../analysis/shingle/ShingleFilter.java | 2 +-
.../analysis/shingle/ShingleMatrixFilter.java | 1 -
.../sinks/DateRecognizerSinkFilter.java | 3 +-
.../lucene/analysis/sv/SwedishAnalyzer.java | 16 +++--
.../lucene/analysis/th/ThaiAnalyzer.java | 13 ++--
.../lucene/analysis/tr/TurkishAnalyzer.java | 12 ++--
.../analysis/ar/TestArabicAnalyzer.java | 15 ++--
.../ar/TestArabicNormalizationFilter.java | 4 +-
.../analysis/ar/TestArabicStemFilter.java | 7 +-
.../analysis/bg/TestBulgarianAnalyzer.java | 12 ++--
.../analysis/bg/TestBulgarianStemmer.java | 12 ++--
.../analysis/br/TestBrazilianStemmer.java | 23 +++---
.../lucene/analysis/cjk/TestCJKTokenizer.java | 9 ++-
.../compound/TestCompoundWordTokenFilter.java | 26 +++----
.../lucene/analysis/cz/TestCzechAnalyzer.java | 8 +--
.../lucene/analysis/cz/TestCzechStemmer.java | 19 +++--
.../analysis/da/TestDanishAnalyzer.java | 7 +-
.../analysis/de/TestGermanAnalyzer.java | 20 +++---
.../analysis/de/TestGermanStemFilter.java | 3 +-
.../lucene/analysis/el/GreekAnalyzerTest.java | 4 +-
.../analysis/en/TestEnglishAnalyzer.java | 7 +-
.../analysis/es/TestSpanishAnalyzer.java | 7 +-
.../analysis/fa/TestPersianAnalyzer.java | 15 ++--
.../fa/TestPersianNormalizationFilter.java | 3 +-
.../analysis/fi/TestFinnishAnalyzer.java | 7 +-
.../lucene/analysis/fr/TestElision.java | 13 ++--
.../analysis/fr/TestFrenchAnalyzer.java | 14 ++--
.../lucene/analysis/hi/TestHindiAnalyzer.java | 7 +-
.../analysis/hi/TestHindiNormalizer.java | 3 +-
.../lucene/analysis/hi/TestHindiStemmer.java | 3 +-
.../analysis/hu/TestHungarianAnalyzer.java | 7 +-
.../analysis/in/TestIndicNormalizer.java | 3 +-
.../analysis/in/TestIndicTokenizer.java | 5 +-
.../analysis/it/TestItalianAnalyzer.java | 7 +-
.../miscellaneous/PatternAnalyzerTest.java | 15 ++--
.../TestPrefixAndSuffixAwareTokenFilter.java | 3 +-
.../TestPrefixAwareTokenFilter.java | 3 +-
.../TestStemmerOverrideFilter.java | 3 +-
.../ngram/EdgeNGramTokenFilterTest.java | 9 ++-
.../ngram/EdgeNGramTokenizerTest.java | 2 +-
.../analysis/ngram/NGramTokenFilterTest.java | 9 ++-
.../analysis/ngram/NGramTokenizerTest.java | 2 +-
.../lucene/analysis/nl/TestDutchStemmer.java | 14 ++--
.../analysis/no/TestNorwegianAnalyzer.java | 7 +-
.../DelimitedPayloadTokenFilterTest.java | 14 ++--
.../NumericPayloadTokenFilterTest.java | 3 +-
.../TokenOffsetPayloadTokenFilterTest.java | 3 +-
.../TypeAsPayloadTokenFilterTest.java | 4 +-
.../analysis/pt/TestPortugueseAnalyzer.java | 7 +-
.../query/QueryAutoStopWordAnalyzerTest.java | 15 ++--
.../reverse/TestReverseStringFilter.java | 36 +++++-----
.../analysis/ro/TestRomanianAnalyzer.java | 7 +-
.../analysis/ru/TestRussianAnalyzer.java | 13 ++--
.../lucene/analysis/ru/TestRussianStem.java | 18 ++---
.../shingle/ShingleAnalyzerWrapperTest.java | 31 ++++----
.../analysis/shingle/ShingleFilterTest.java | 3 +-
.../shingle/TestShingleMatrixFilter.java | 45 ++++--------
.../DateRecognizerSinkTokenizerTest.java | 3 +-
.../sinks/TokenRangeSinkTokenizerTest.java | 3 +-
.../sinks/TokenTypeSinkTokenizerTest.java | 4 +-
.../analysis/snowball/TestSnowball.java | 10 +--
.../analysis/sv/TestSwedishAnalyzer.java | 7 +-
.../lucene/analysis/th/TestThaiAnalyzer.java | 11 ++-
.../analysis/tr/TestTurkishAnalyzer.java | 7 +-
.../tr/TestTurkishLowerCaseFilter.java | 7 +-
.../org/apache/lucene/ant/IndexTaskTest.java | 16 ++---
.../feeds/LongToEnglishContentSource.java | 2 +
.../benchmark/byTask/feeds/QueryMaker.java | 2 +-
.../lucene/benchmark/byTask/stats/Points.java | 3 -
.../benchmark/byTask/stats/TaskStats.java | 2 +-
.../byTask/tasks/NearRealtimeReaderTask.java | 2 +-
.../byTask/tasks/NewShingleAnalyzerTask.java | 4 --
.../byTask/tasks/ReadTokensTask.java | 2 +-
.../lucene/benchmark/byTask/utils/Config.java | 2 +-
.../lucene/benchmark/stats/TimeData.java | 4 +-
.../benchmark/utils/ExtractReuters.java | 4 +-
.../benchmark/utils/ExtractWikipedia.java | 2 +-
.../benchmark/byTask/TestPerfTasksLogic.java | 5 +-
.../benchmark/byTask/TestPerfTasksParse.java | 2 -
.../benchmark/quality/TestQualityRun.java | 3 -
.../apache/lucene/store/je/JEStoreTest.java | 33 ++-------
.../org/apache/lucene/store/db/Block.java | 2 -
.../apache/lucene/store/db/DbDirectory.java | 1 -
.../apache/lucene/store/db/DbStoreTest.java | 30 ++------
.../lucene/store/db/SanityLoadLibrary.java | 2 +-
.../search/vectorhighlight/FieldFragList.java | 2 -
.../vectorhighlight/FieldTermStack.java | 2 +-
.../vectorhighlight/AbstractTestCase.java | 19 ++---
.../SimpleFragmentsBuilderTest.java | 1 -
.../highlight/HighlighterPhraseTest.java | 22 +++---
.../search/highlight/HighlighterTest.java | 53 +++++++-------
.../collation/TestICUCollationKeyFilter.java | 6 +-
.../instantiated/InstantiatedDocument.java | 2 +-
.../store/instantiated/InstantiatedIndex.java | 3 +-
.../instantiated/InstantiatedIndexReader.java | 9 +--
.../instantiated/InstantiatedIndexWriter.java | 6 +-
.../store/instantiated/InstantiatedTerm.java | 6 +-
.../instantiated/InstantiatedTermEnum.java | 6 +-
.../store/instantiated/TestIndicesEquals.java | 11 ++-
.../store/instantiated/TestSerialization.java | 7 +-
.../TestUnoptimizedReaderOnConstructor.java | 17 ++---
.../lucene/index/memory/MemoryIndex.java | 36 +++++-----
.../lucene/index/memory/MemoryIndexTest.java | 20 +++---
.../index/BalancedSegmentMergePolicy.java | 2 +-
.../org/apache/lucene/misc/ChainedFilter.java | 1 -
.../lucene/index/TestFieldNormModifier.java | 12 ++--
.../lucene/index/TestIndexSplitter.java | 3 +-
.../index/TestMultiPassIndexSplitter.java | 11 ++-
.../lucene/index/TestTermVectorAccessor.java | 8 +--
.../apache/lucene/misc/ChainedFilterTest.java | 13 ++--
.../lucene/misc/SweetSpotSimilarityTest.java | 24 +------
.../lucene/misc/TestLengthNormModifier.java | 12 ++--
.../analyzing/TestAnalyzingQueryParser.java | 15 ++--
.../complexPhrase/TestComplexPhraseQuery.java | 14 ++--
.../ext/TestExtendableQueryParser.java | 7 +-
.../precedence/TestPrecedenceQueryParser.java | 32 ++++-----
.../lucene/search/BooleanFilterTest.java | 13 ++--
.../lucene/search/DuplicateFilterTest.java | 16 ++---
.../lucene/search/FuzzyLikeThisQueryTest.java | 15 ++--
.../apache/lucene/search/TermsFilterTest.java | 10 ++-
.../search/similar/TestMoreLikeThis.java | 6 +-
.../core/config/QueryConfigHandler.java | 2 +-
.../MultiFieldQueryParserWrapper.java | 4 +-
.../standard/QueryParserWrapper.java | 2 +-
.../standard/config/BoostAttributeImpl.java | 2 +-
.../DefaultPhraseSlopAttributeImpl.java | 1 -
.../config/FieldBoostMapAttributeImpl.java | 2 +-
.../FieldDateResolutionMapAttributeImpl.java | 2 +-
.../standard/config/FuzzyAttributeImpl.java | 2 +-
.../PositionIncrementsAttributeImpl.java | 1 -
.../standard/TestMultiAnalyzerQPHelper.java | 12 ++--
.../standard/TestMultiAnalyzerWrapper.java | 12 ++--
.../standard/TestMultiFieldQPHelper.java | 42 ++++++-----
.../TestMultiFieldQueryParserWrapper.java | 41 +++++------
.../queryParser/standard/TestQPHelper.java | 54 +++++++-------
.../standard/TestQueryParserWrapper.java | 58 ++++++++-------
.../lucene/search/regex/SpanRegexQuery.java | 3 +-
.../lucene/search/regex/TestRegexQuery.java | 19 +++--
.../search/regex/TestSpanRegexQuery.java | 13 ++--
.../RemoteCachingWrapperFilterHelper.java | 9 ++-
.../TestRemoteCachingWrapperFilter.java | 3 +-
.../lucene/search/TestRemoteSearchable.java | 7 +-
.../apache/lucene/search/TestRemoteSort.java | 21 +++---
.../tier/DistanceFieldComparatorSource.java | 7 +-
.../lucene/spatial/tier/DistanceHandler.java | 2 +-
.../lucene/spatial/tier/TestCartesian.java | 22 ++----
.../lucene/spatial/tier/TestDistance.java | 19 ++---
.../search/spell/JaroWinklerDistance.java | 2 +-
.../lucene/search/spell/NGramDistance.java | 2 +-
.../search/spell/TestLuceneDictionary.java | 15 ++--
.../lucene/search/spell/TestSpellChecker.java | 25 +++----
.../surround/query/DistanceQuery.java | 4 +-
.../surround/query/FieldsQuery.java | 1 -
.../surround/query/SimpleTerm.java | 2 +-
.../surround/query/SpanNearClauseFactory.java | 10 +--
.../surround/query/BooleanQueryTst.java | 9 +--
.../lucene/swing/models/BaseListModel.java | 4 +-
.../lucene/swing/models/BaseTableModel.java | 10 +--
.../apache/lucene/swing/models/DataStore.java | 4 +-
.../lucene/swing/models/TestBasicList.java | 4 +-
.../lucene/swing/models/TestBasicTable.java | 4 +-
.../analysis/WikipediaTokenizer.java | 1 -
.../analysis/WikipediaTokenizerTest.java | 8 +--
.../apache/lucene/wordnet/AnalyzerUtil.java | 6 +-
.../org/apache/lucene/wordnet/SynExpand.java | 2 +-
.../org/apache/lucene/wordnet/SynonymMap.java | 10 +--
.../wordnet/TestSynonymTokenFilter.java | 12 ++--
.../xmlparser/QueryTemplateManager.java | 1 -
.../builders/BoostingTermBuilder.java | 1 -
.../builders/DuplicateFilterBuilder.java | 5 --
.../xmlparser/TestQueryTemplateManager.java | 8 +--
.../org/apache/lucene/demo/IndexFiles.java | 1 -
.../org/apache/lucene/demo/html/Entities.java | 10 ++-
.../apache/lucene/analysis/CharArrayMap.java | 2 +-
.../apache/lucene/analysis/CharArraySet.java | 1 -
.../lucene/analysis/NumericTokenStream.java | 6 +-
.../apache/lucene/analysis/StopAnalyzer.java | 10 +--
.../lucene/analysis/TeeSinkTokenFilter.java | 2 +-
.../org/apache/lucene/analysis/Token.java | 2 +-
.../analysis/standard/StandardAnalyzer.java | 2 +-
.../analysis/standard/StandardTokenizer.java | 1 -
.../tokenattributes/TermAttributeImpl.java | 2 +-
.../apache/lucene/document/AbstractField.java | 3 +-
.../apache/lucene/document/NumericField.java | 14 ++--
.../org/apache/lucene/index/CheckIndex.java | 2 +-
.../index/ConcurrentMergeScheduler.java | 8 +--
.../lucene/index/DocFieldConsumers.java | 14 ++--
.../index/DocFieldConsumersPerThread.java | 4 +-
.../org/apache/lucene/index/FieldsReader.java | 6 +-
.../org/apache/lucene/index/IndexReader.java | 4 +-
.../org/apache/lucene/index/IndexWriter.java | 2 +-
.../org/apache/lucene/index/MultiReader.java | 2 -
src/java/org/apache/lucene/index/Payload.java | 2 +-
.../lucene/index/ReusableStringReader.java | 2 +-
.../org/apache/lucene/index/SegmentInfo.java | 2 +-
.../org/apache/lucene/index/SegmentInfos.java | 4 +-
.../lucene/index/StoredFieldsWriter.java | 1 -
.../lucene/index/TermVectorsReader.java | 2 +-
.../lucene/index/TermVectorsTermsWriter.java | 14 ++--
.../index/TermVectorsTermsWriterPerField.java | 10 +--
.../lucene/index/TermVectorsWriter.java | 2 +-
.../org/apache/lucene/index/TermsHash.java | 2 +-
.../lucene/index/TermsHashPerField.java | 2 +-
.../lucene/search/DisjunctionMaxScorer.java | 2 +-
.../lucene/search/DisjunctionSumScorer.java | 4 +-
.../lucene/search/DocIdSetIterator.java | 2 -
.../org/apache/lucene/search/FieldCache.java | 2 +-
.../apache/lucene/search/FieldCacheImpl.java | 16 ++---
.../apache/lucene/search/FieldComparator.java | 24 +++----
.../lucene/search/FieldValueHitQueue.java | 2 +-
.../search/FilteredDocIdSetIterator.java | 2 +-
.../apache/lucene/search/IndexSearcher.java | 10 +--
.../apache/lucene/search/ReqExclScorer.java | 2 +-
.../apache/lucene/search/ReqOptSumScorer.java | 2 +-
.../org/apache/lucene/search/Similarity.java | 4 +-
.../search/function/ByteFieldSource.java | 7 +-
.../search/function/CustomScoreProvider.java | 1 -
.../search/function/CustomScoreQuery.java | 2 -
.../lucene/search/function/DocValues.java | 2 +-
.../search/function/FloatFieldSource.java | 5 +-
.../search/function/IntFieldSource.java | 7 +-
.../search/function/OrdFieldSource.java | 2 +-
.../function/ReverseOrdFieldSource.java | 2 +-
.../search/function/ShortFieldSource.java | 7 +-
.../search/function/ValueSourceQuery.java | 4 +-
.../org/apache/lucene/store/IndexOutput.java | 6 +-
.../store/LockObtainFailedException.java | 2 +-
.../store/LockReleaseFailedException.java | 2 +-
.../apache/lucene/store/MMapDirectory.java | 4 +-
.../apache/lucene/store/NoLockFactory.java | 4 +-
.../lucene/store/SimpleFSLockFactory.java | 2 +-
.../store/SingleInstanceLockFactory.java | 2 +-
.../lucene/util/AverageGuessMemoryModel.java | 4 +-
.../util/IndexableBinaryStringTools.java | 2 +-
.../org/apache/lucene/util/MemoryModel.java | 2 +-
.../org/apache/lucene/util/NumericUtils.java | 10 +--
.../org/apache/lucene/util/OpenBitSet.java | 2 +-
.../org/apache/lucene/util/Parameter.java | 7 +-
.../apache/lucene/util/RamUsageEstimator.java | 4 +-
.../lucene/TestMergeSchedulerExternal.java | 11 +--
.../analysis/TestASCIIFoldingFilter.java | 2 +-
.../apache/lucene/analysis/TestAnalyzers.java | 1 +
.../lucene/analysis/TestCharArrayMap.java | 3 +-
.../lucene/analysis/TestCharArraySet.java | 5 +-
.../lucene/analysis/TestCharTokenizers.java | 8 +--
.../lucene/analysis/TestKeywordAnalyzer.java | 2 +-
.../analysis/TestMappingCharFilter.java | 2 +-
.../org/apache/lucene/analysis/TestToken.java | 1 -
.../lucene/collation/CollationTestBase.java | 11 +--
.../collation/TestCollationKeyFilter.java | 6 +-
.../lucene/document/TestBinaryDocument.java | 7 +-
.../apache/lucene/document/TestDocument.java | 7 +-
.../org/apache/lucene/index/DocHelper.java | 2 +-
.../apache/lucene/index/TestAtomicUpdate.java | 2 -
.../index/TestBackwardsCompatibility.java | 8 +--
.../apache/lucene/index/TestCompoundFile.java | 14 ++--
.../lucene/index/TestDirectoryReader.java | 7 --
src/test/org/apache/lucene/index/TestDoc.java | 2 +-
.../apache/lucene/index/TestFieldsReader.java | 7 +-
.../apache/lucene/index/TestIndexReader.java | 7 +-
.../index/TestIndexReaderCloneNorms.java | 5 --
.../apache/lucene/index/TestIndexWriter.java | 16 ++---
.../index/TestIndexWriterLockRelease.java | 11 ++-
.../lucene/index/TestIndexWriterReader.java | 70 +------------------
.../apache/lucene/index/TestMultiReader.java | 1 -
.../org/apache/lucene/index/TestNorms.java | 12 ++--
.../lucene/index/TestParallelTermEnum.java | 3 +-
.../org/apache/lucene/index/TestPayloads.java | 7 +-
.../lucene/index/TestSegmentReader.java | 2 +-
.../lucene/index/TestSegmentTermDocs.java | 3 -
.../lucene/index/TestStressIndexing.java | 1 -
.../lucene/index/TestStressIndexing2.java | 4 +-
.../lucene/index/TestTermVectorsReader.java | 7 --
.../lucene/queryParser/TestQueryParser.java | 6 +-
.../search/CachingWrapperFilterHelper.java | 9 ++-
.../org/apache/lucene/search/CheckHits.java | 6 +-
.../lucene/search/JustCompileSearch.java | 2 +-
.../apache/lucene/search/TestBoolean2.java | 5 +-
.../search/TestBooleanMinShouldMatch.java | 9 ++-
.../search/TestComplexExplanations.java | 2 +-
.../lucene/search/TestCustomSearcherSort.java | 2 +-
.../apache/lucene/search/TestDateSort.java | 2 +-
.../search/TestDisjunctionMaxQuery.java | 2 +-
.../apache/lucene/search/TestDocIdSet.java | 5 +-
.../search/TestElevationComparator.java | 5 +-
.../lucene/search/TestExplanations.java | 6 +-
.../apache/lucene/search/TestFieldCache.java | 4 +-
.../search/TestFieldCacheRangeFilter.java | 4 +-
.../lucene/search/TestFilteredQuery.java | 5 +-
.../search/TestMultiTermConstantScore.java | 2 +-
.../search/TestMultiThreadTermVectors.java | 2 +-
.../apache/lucene/search/TestPhraseQuery.java | 12 ++--
.../lucene/search/TestQueryTermVector.java | 2 -
.../apache/lucene/search/TestScorerPerf.java | 2 +-
.../org/apache/lucene/search/TestSort.java | 17 ++---
.../lucene/search/TestTermRangeFilter.java | 2 -
.../lucene/search/TestTermRangeQuery.java | 2 +-
.../apache/lucene/search/TestTermVectors.java | 3 +-
.../apache/lucene/search/TestThreadSafe.java | 5 +-
.../search/TestTimeLimitingCollector.java | 2 +-
.../search/function/TestCustomScoreQuery.java | 5 +-
.../search/function/TestFieldScoreQuery.java | 1 -
.../lucene/search/function/TestOrdValues.java | 1 -
.../search/payloads/TestPayloadNearQuery.java | 2 +-
.../lucene/search/spans/TestBasics.java | 2 +-
.../spans/TestFieldMaskingSpanQuery.java | 6 +-
.../search/spans/TestNearSpansOrdered.java | 6 +-
.../apache/lucene/search/spans/TestSpans.java | 2 +-
.../search/spans/TestSpansAdvanced.java | 5 +-
.../lucene/store/TestBufferedIndexInput.java | 4 +-
.../apache/lucene/store/TestLockFactory.java | 37 +++++-----
.../apache/lucene/store/TestRAMDirectory.java | 6 +-
.../apache/lucene/store/TestWindowsMMap.java | 2 +-
.../apache/lucene/util/LuceneTestCase.java | 5 +-
.../apache/lucene/util/LuceneTestCaseJ4.java | 17 +++--
.../org/apache/lucene/util/TestBitVector.java | 1 -
.../util/TestFieldCacheSanityChecker.java | 44 ++++--------
.../util/TestIndexableBinaryStringTools.java | 4 +-
.../lucene/util/TestRamUsageEstimator.java | 14 ++--
.../apache/lucene/util/TestStringIntern.java | 10 +--
.../apache/lucene/util/TestVirtualMethod.java | 9 ++-
.../util/cache/TestDoubleBarrelLRUCache.java | 2 +-
356 files changed, 1280 insertions(+), 1731 deletions(-)
diff --git a/CHANGES.txt b/CHANGES.txt
index 75dfc65bc80..dce16951d62 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -186,7 +186,9 @@ Optimizations
* LUCENE-2195: Speedup CharArraySet if set is empty.
(Simon Willnauer via Robert Muir)
-
+
+* LUCENE-2285: Code cleanup. (Shai Erera via Uwe Schindler)
+
Build
* LUCENE-2124: Moved the JDK-based collation support from contrib/collation
@@ -209,10 +211,11 @@ Test Cases
* LUCENE-2170: Fix thread starvation problems. (Uwe Schindler)
-* LUCENE-2248, LUCENE-2251: Refactor tests to not use Version.LUCENE_CURRENT,
- but instead use a global static value from LuceneTestCase(J4), that
- contains the release version. (Uwe Schindler, Simon Willnauer)
-
+* LUCENE-2248, LUCENE-2251, LUCENE-2285: Refactor tests to not use
+ Version.LUCENE_CURRENT, but instead use a global static value
+ from LuceneTestCase(J4), that contains the release version.
+ (Uwe Schindler, Simon Willnauer, Shai Erera)
+
================== Release 2.9.2 / 3.0.1 2010-02-26 ====================
Changes in backwards compatibility policy
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java
index 4ed6c7259a5..a1ff78faa0f 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java
@@ -25,7 +25,6 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
@@ -162,14 +161,16 @@ public final class ArabicAnalyzer extends StopwordAnalyzerBase {
this(matchVersion, WordlistLoader.getWordSet( stopwords, STOPWORDS_COMMENT));
}
-
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from an {@link ArabicLetterTokenizer} filtered with
- * {@link LowerCaseFilter}, {@link StopFilter}, {@link ArabicNormalizationFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided
- * and {@link ArabicStemFilter}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link ArabicLetterTokenizer} filtered with
+ * {@link LowerCaseFilter}, {@link StopFilter},
+ * {@link ArabicNormalizationFilter}, {@link KeywordMarkerTokenFilter}
+ * if a stem exclusion set is provided and {@link ArabicStemFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java
index 9df60a51435..242db59005a 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java
@@ -24,7 +24,6 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
@@ -117,15 +116,18 @@ public final class BulgarianAnalyzer extends StopwordAnalyzerBase {
super(matchVersion, stopwords);
this.stemExclusionSet = CharArraySet.unmodifiableSet(CharArraySet.copy(
matchVersion, stemExclusionSet)); }
-
+
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link BulgarianStemFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link BulgarianStemFilter}.
*/
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java
index fc064f33bb1..2cdd0c769f2 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java
@@ -29,7 +29,6 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
@@ -191,12 +190,16 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
excltable = WordlistLoader.getWordSet( exclusionlist );
setPreviousTokenStream(null); // force a new stemmer to be created
}
+
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a {@link StandardTokenizer} filtered with
- * {@link LowerCaseFilter}, {@link StandardFilter}, {@link StopFilter}, and
- * {@link BrazilianStemFilter}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link LowerCaseFilter}, {@link StandardFilter}, {@link StopFilter}
+ * , and {@link BrazilianStemFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java
index 896691a9678..54104c52ac6 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis.cjk;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.Tokenizer;
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java
index cb4475536a7..d1f9387b8d1 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java
@@ -20,7 +20,6 @@ package org.apache.lucene.analysis.cn;
import java.io.Reader;
import org.apache.lucene.analysis.ReusableAnalyzerBase;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.standard.StandardAnalyzer; // javadoc @link
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;
@@ -35,11 +34,13 @@ import org.apache.lucene.analysis.Tokenizer;
public final class ChineseAnalyzer extends ReusableAnalyzerBase {
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a
- * {@link ChineseTokenizer} filtered with {@link ChineseFilter}
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link ChineseTokenizer} filtered with
+ * {@link ChineseFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java
index e9afe2c4870..077226548c8 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java
@@ -113,7 +113,7 @@ public class HyphenationCompoundWordTokenFilter extends
* strings.
*/
public HyphenationCompoundWordTokenFilter(Version matchVersion, TokenStream input,
- HyphenationTree hyphenator, Set dictionary) {
+ HyphenationTree hyphenator, Set> dictionary) {
this(input, hyphenator, dictionary, DEFAULT_MIN_WORD_SIZE,
DEFAULT_MIN_SUBWORD_SIZE, DEFAULT_MAX_SUBWORD_SIZE, false);
}
@@ -145,7 +145,7 @@ public class HyphenationCompoundWordTokenFilter extends
* Add only the longest matching subword to the stream
*/
public HyphenationCompoundWordTokenFilter(Version matchVersion, TokenStream input,
- HyphenationTree hyphenator, Set dictionary, int minWordSize,
+ HyphenationTree hyphenator, Set> dictionary, int minWordSize,
int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
super(matchVersion, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize,
onlyLongestMatch);
@@ -201,7 +201,7 @@ public class HyphenationCompoundWordTokenFilter extends
*/
@Deprecated
public HyphenationCompoundWordTokenFilter(TokenStream input,
- HyphenationTree hyphenator, Set dictionary) {
+ HyphenationTree hyphenator, Set> dictionary) {
this(Version.LUCENE_30, input, hyphenator, dictionary, DEFAULT_MIN_WORD_SIZE,
DEFAULT_MIN_SUBWORD_SIZE, DEFAULT_MAX_SUBWORD_SIZE, false);
}
@@ -223,7 +223,7 @@ public class HyphenationCompoundWordTokenFilter extends
*/
@Deprecated
public HyphenationCompoundWordTokenFilter(TokenStream input,
- HyphenationTree hyphenator, Set dictionary, int minWordSize,
+ HyphenationTree hyphenator, Set> dictionary, int minWordSize,
int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
super(Version.LUCENE_30, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize,
onlyLongestMatch);
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java
index 380aa44a2c5..373935ab6bc 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java
@@ -83,7 +83,7 @@ public class CharVector implements Cloneable, Serializable {
@Override
public Object clone() {
- CharVector cv = new CharVector((char[]) array.clone(), blockSize);
+ CharVector cv = new CharVector(array.clone(), blockSize);
cv.n = this.n;
return cv;
}
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphenation.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphenation.java
index 7a276a8a7a2..aa9974824f6 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphenation.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphenation.java
@@ -26,11 +26,6 @@ public class Hyphenation {
private int[] hyphenPoints;
- /**
- * number of hyphenation points in word
- */
- private int len;
-
/**
* rawWord as made of alternating strings and {@link Hyphen Hyphen} instances
*/
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java
index 41be1915778..c61a8d06d1b 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java
@@ -44,7 +44,7 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer,
/**
* This map stores hyphenation exceptions
*/
- protected HashMap stoplist;
+ protected HashMap> stoplist;
/**
* This map stores the character classes
@@ -57,7 +57,7 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer,
private transient TernaryTree ivalues;
public HyphenationTree() {
- stoplist = new HashMap(23); // usually a small table
+ stoplist = new HashMap>(23); // usually a small table
classmap = new TernaryTree();
vspace = new ByteVector();
vspace.alloc(1); // this reserves index 0, which we don't use
@@ -363,7 +363,7 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer,
if (stoplist.containsKey(sw)) {
// assume only simple hyphens (Hyphen.pre="-", Hyphen.post = Hyphen.no =
// null)
- ArrayList hw = stoplist.get(sw);
+ ArrayList hw = stoplist.get(sw);
int j = 0;
for (i = 0; i < hw.size(); i++) {
Object o = hw.get(i);
@@ -443,7 +443,7 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer,
* @param hyphenatedword a vector of alternating strings and
* {@link Hyphen hyphen} objects.
*/
- public void addException(String word, ArrayList hyphenatedword) {
+ public void addException(String word, ArrayList hyphenatedword) {
stoplist.put(word, hyphenatedword);
}
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternConsumer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternConsumer.java
index 243f2487811..e20126f3f6f 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternConsumer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternConsumer.java
@@ -42,7 +42,7 @@ public interface PatternConsumer {
* his own hyphenation. A hyphenatedword is a vector of alternating String's
* and {@link Hyphen Hyphen} instances
*/
- void addException(String word, ArrayList hyphenatedword);
+ void addException(String word, ArrayList hyphenatedword);
/**
* Add hyphenation patterns.
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java
index 9efaefe5bcf..bf04b3a3697 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java
@@ -51,7 +51,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
StringBuilder token;
- ArrayList exception;
+ ArrayList exception;
char hyphenChar;
@@ -199,8 +199,8 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
return pat.toString();
}
- protected ArrayList normalizeException(ArrayList ex) {
- ArrayList res = new ArrayList();
+ protected ArrayList normalizeException(ArrayList> ex) {
+ ArrayList res = new ArrayList();
for (int i = 0; i < ex.size(); i++) {
Object item = ex.get(i);
if (item instanceof String) {
@@ -230,7 +230,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
return res;
}
- protected String getExceptionWord(ArrayList ex) {
+ protected String getExceptionWord(ArrayList> ex) {
StringBuilder res = new StringBuilder();
for (int i = 0; i < ex.size(); i++) {
Object item = ex.get(i);
@@ -291,7 +291,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
currElement = ELEM_PATTERNS;
} else if (local.equals("exceptions")) {
currElement = ELEM_EXCEPTIONS;
- exception = new ArrayList();
+ exception = new ArrayList();
} else if (local.equals("hyphen")) {
if (token.length() > 0) {
exception.add(token.toString());
@@ -308,6 +308,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
* java.lang.String, java.lang.String)
*/
@Override
+ @SuppressWarnings("unchecked")
public void endElement(String uri, String local, String raw) {
if (token.length() > 0) {
@@ -319,7 +320,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
case ELEM_EXCEPTIONS:
exception.add(word);
exception = normalizeException(exception);
- consumer.addException(getExceptionWord(exception),
+ consumer.addException(getExceptionWord(exception),
(ArrayList) exception.clone());
break;
case ELEM_PATTERNS:
@@ -344,6 +345,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
/**
* @see org.xml.sax.ContentHandler#characters(char[], int, int)
*/
+ @SuppressWarnings("unchecked")
@Override
public void characters(char ch[], int start, int length) {
StringBuffer chars = new StringBuffer(length);
@@ -428,7 +430,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
System.out.println("class: " + c);
}
- public void addException(String w, ArrayList e) {
+ public void addException(String w, ArrayList e) {
System.out.println("exception: " + w + " : " + e.toString());
}
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java
index 59a22683dd2..b3c3d36e865 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java
@@ -351,10 +351,10 @@ public class TernaryTree implements Cloneable, Serializable {
@Override
public Object clone() {
TernaryTree t = new TernaryTree();
- t.lo = (char[]) this.lo.clone();
- t.hi = (char[]) this.hi.clone();
- t.eq = (char[]) this.eq.clone();
- t.sc = (char[]) this.sc.clone();
+ t.lo = this.lo.clone();
+ t.hi = this.hi.clone();
+ t.eq = this.eq.clone();
+ t.sc = this.sc.clone();
t.kv = (CharVector) this.kv.clone();
t.root = this.root;
t.freenode = this.freenode;
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java
index da99eb19e35..54687fce679 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java
@@ -18,7 +18,6 @@ package org.apache.lucene.analysis.cz;
*/
import org.apache.lucene.analysis.ReusableAnalyzerBase;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
@@ -216,16 +215,20 @@ public final class CzechAnalyzer extends ReusableAnalyzerBase {
stoptable = Collections.emptySet();
}
}
+
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided
- * {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, and {@link CzechStemFilter} (only if version is
- * >= LUCENE_31). If a version is >= LUCENE_31 and a stem exclusion set
- * is provided via {@link #CzechAnalyzer(Version, Set, Set)} a
- * {@link KeywordMarkerTokenFilter} is added before {@link CzechStemFilter}.
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , and {@link CzechStemFilter} (only if version is >= LUCENE_31). If
+ * a version is >= LUCENE_31 and a stem exclusion set is provided via
+ * {@link #CzechAnalyzer(Version, Set, Set)} a
+ * {@link KeywordMarkerTokenFilter} is added before
+ * {@link CzechStemFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/da/DanishAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/da/DanishAnalyzer.java
index 67fdd59f112..d52fba8a59b 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/da/DanishAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/da/DanishAnalyzer.java
@@ -30,7 +30,6 @@ import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@ public final class DanishAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java
index 50c4eca70ca..ffa7900ea6d 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java
@@ -29,7 +29,6 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
@@ -222,16 +221,17 @@ public final class GermanAnalyzer extends StopwordAnalyzerBase {
exclusionSet = WordlistLoader.getWordSet(exclusionlist);
setPreviousTokenStream(null); // force a new stemmer to be created
}
-
+
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a
- * {@link StandardTokenizer} filtered with {@link StandardFilter},
- * {@link LowerCaseFilter}, {@link StopFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided, and
- * {@link SnowballFilter}
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided, and {@link SnowballFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java
index 076fd529e15..477881b005f 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis.el;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
@@ -120,15 +119,17 @@ public final class GreekAnalyzer extends StopwordAnalyzerBase
{
this(matchVersion, stopwords.keySet());
}
-
- /**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a
- * {@link StandardTokenizer} filtered with
- * {@link GreekLowerCaseFilter}, {@link StandardFilter} and {@link StopFilter}
- */
+
+ /**
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link GreekLowerCaseFilter}, {@link StandardFilter} and
+ * {@link StopFilter}
+ */
@Override
protected TokenStreamComponents createComponents(String fieldName,
Reader reader) {
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java
index c0d0adc3bfb..df6f1b7e912 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java
@@ -29,7 +29,6 @@ import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -90,13 +89,16 @@ public final class EnglishAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link PorterStemFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link PorterStemFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/es/SpanishAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/es/SpanishAnalyzer.java
index d3e0de07ccb..d4322ef36a1 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/es/SpanishAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/es/SpanishAnalyzer.java
@@ -30,7 +30,6 @@ import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@ public final class SpanishAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fa/PersianAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fa/PersianAnalyzer.java
index a5331f19582..62f1795ceed 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fa/PersianAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fa/PersianAnalyzer.java
@@ -25,7 +25,6 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
@@ -136,12 +135,13 @@ public final class PersianAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided
- * {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a {@link ArabicLetterTokenizer}
- * filtered with {@link LowerCaseFilter},
- * {@link ArabicNormalizationFilter},
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link ArabicLetterTokenizer} filtered with
+ * {@link LowerCaseFilter}, {@link ArabicNormalizationFilter},
* {@link PersianNormalizationFilter} and Persian Stop words
*/
@Override
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fi/FinnishAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fi/FinnishAnalyzer.java
index 8e9a22b8042..ae9eb1d69e4 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fi/FinnishAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fi/FinnishAnalyzer.java
@@ -30,7 +30,6 @@ import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@ public final class FinnishAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java
index c4079750215..6d7c2acb331 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java
@@ -20,7 +20,6 @@ package org.apache.lucene.analysis.fr;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
@@ -225,14 +224,16 @@ public final class FrenchAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided
- * {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link ElisionFilter},
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link ElisionFilter},
* {@link LowerCaseFilter}, {@link StopFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided,
- * and {@link SnowballFilter}
+ * {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided, and {@link SnowballFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/hi/HindiAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/hi/HindiAnalyzer.java
index 90d7d5934a1..2cc2f916aff 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/hi/HindiAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/hi/HindiAnalyzer.java
@@ -22,7 +22,6 @@ import java.io.Reader;
import java.util.Set;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
@@ -106,15 +105,16 @@ public final class HindiAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided
- * {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a {@link IndicTokenizer}
- * filtered with {@link LowerCaseFilter},
- * {@link IndicNormalizationFilter},
- * {@link HindiNormalizationFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided,
- * {@link HindiStemFilter}, and Hindi Stop words
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link IndicTokenizer} filtered with
+ * {@link LowerCaseFilter}, {@link IndicNormalizationFilter},
+ * {@link HindiNormalizationFilter}, {@link KeywordMarkerTokenFilter}
+ * if a stem exclusion set is provided, {@link HindiStemFilter}, and
+ * Hindi Stop words
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/hu/HungarianAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/hu/HungarianAnalyzer.java
index e6d59a393c6..a86a20b7e05 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/hu/HungarianAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/hu/HungarianAnalyzer.java
@@ -30,7 +30,6 @@ import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@ public final class HungarianAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java
index d2e639fdf4e..bb59312d776 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java
@@ -30,7 +30,6 @@ import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@ public final class ItalianAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java
index efd8e0245be..4f597897698 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java
@@ -311,7 +311,7 @@ public final class PatternAnalyzer extends Analyzer {
return new String(output, 0, len);
} finally {
- if (input != null) input.close();
+ input.close();
}
}
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
index fd4c65d3d97..fa85dd8ec93 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
@@ -124,7 +124,7 @@ public final class EdgeNGramTokenFilter extends TokenFilter {
if (!input.incrementToken()) {
return false;
} else {
- curTermBuffer = (char[]) termAtt.termBuffer().clone();
+ curTermBuffer = termAtt.termBuffer().clone();
curTermLength = termAtt.termLength();
curGramSize = minGram;
tokStart = offsetAtt.startOffset();
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
index 6fd4b7c09d5..41b956357ac 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
@@ -79,7 +79,7 @@ public final class NGramTokenFilter extends TokenFilter {
if (!input.incrementToken()) {
return false;
} else {
- curTermBuffer = (char[]) termAtt.termBuffer().clone();
+ curTermBuffer = termAtt.termBuffer().clone();
curTermLength = termAtt.termLength();
curGramSize = minGram;
curPos = 0;
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java
index b455cfbde7a..836d2733dba 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java
@@ -30,7 +30,6 @@ import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@ public final class NorwegianAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java
index 990559bbefd..c1e1c7bab80 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java
@@ -30,7 +30,6 @@ import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@ public final class PortugueseAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
index 83a6067bbcd..3e454634ce5 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
@@ -192,7 +192,7 @@ public final class QueryAutoStopWordAnalyzer extends Analyzer {
* if there stopwords, it is a StopFilter around wrapped.
*/
TokenStream withStopFilter;
- };
+ }
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader)
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java
index c5f28f6c3d3..45fcf7b2818 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java
@@ -29,7 +29,6 @@ import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -110,13 +109,16 @@ public final class RomanianAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java
index 934da579172..34b98035639 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java
@@ -26,7 +26,6 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -160,16 +159,17 @@ public final class RussianAnalyzer extends StopwordAnalyzerBase
this(matchVersion, stopwords.keySet());
}
- /**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a
- * {@link StandardTokenizer} filtered with {@link StandardFilter},
- * {@link LowerCaseFilter}, {@link StopFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided,
- * and {@link SnowballFilter}
- */
+ /**
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided, and {@link SnowballFilter}
+ */
@Override
protected TokenStreamComponents createComponents(String fieldName,
Reader reader) {
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java
index f3a70a26133..fea9e2121fe 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java
@@ -26,7 +26,7 @@ package org.apache.lucene.analysis.ru;
class RussianStemmer
{
// positions of RV, R1 and R2 respectively
- private int RV, R1, R2;
+ private int RV, /*R1,*/ R2;
// letters (currently unused letters are commented out)
private final static char A = '\u0430';
@@ -263,11 +263,7 @@ class RussianStemmer
if (!findAndRemoveEnding(stemmingZone, adjectiveEndings))
return false;
// if adjective ending was found, try for participle ending.
- // variable r is unused, we are just interested in the side effect of
- // findAndRemoveEnding():
- boolean r =
- findAndRemoveEnding(stemmingZone, participleEndings1, participle1Predessors)
- ||
+ if (!findAndRemoveEnding(stemmingZone, participleEndings1, participle1Predessors))
findAndRemoveEnding(stemmingZone, participleEndings2);
return true;
}
@@ -391,7 +387,7 @@ class RussianStemmer
private void markPositions(String word)
{
RV = 0;
- R1 = 0;
+// R1 = 0;
R2 = 0;
int i = 0;
// find RV
@@ -409,7 +405,7 @@ class RussianStemmer
}
if (word.length() - 1 < ++i)
return; // R1 zone is empty
- R1 = i;
+// R1 = i;
// find R2
while (word.length() > i && !isVowel(word.charAt(i)))
{
@@ -532,13 +528,9 @@ class RussianStemmer
if (!perfectiveGerund(stemmingZone))
{
reflexive(stemmingZone);
- // variable r is unused, we are just interested in the flow that gets
- // created by logical expression: apply adjectival(); if that fails,
- // apply verb() etc
- boolean r =
- adjectival(stemmingZone)
- || verb(stemmingZone)
- || noun(stemmingZone);
+ if (!adjectival(stemmingZone))
+ if (!verb(stemmingZone))
+ noun(stemmingZone);
}
// Step 2
removeI(stemmingZone);
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
index ebf789b4d4b..ad28d70b45e 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
@@ -391,8 +391,8 @@ public final class ShingleFilter extends TokenFilter {
}
/**
- * {@see #advance()}
* @return the current value.
+ * @see #advance()
*/
public int getValue() {
return value;
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java
index ec49ea1934c..ec6eee07dd4 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java
@@ -30,7 +30,6 @@ import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.EmptyTokenStream;
import org.apache.lucene.analysis.payloads.PayloadHelper;
-import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column;
import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column.Row;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java
index 6e1493177e2..f3e529a8859 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis.sinks;
import java.text.DateFormat;
import java.text.ParseException;
-import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkFilter;
@@ -42,7 +41,7 @@ public class DateRecognizerSinkFilter extends SinkFilter {
* Uses {@link java.text.SimpleDateFormat#getDateInstance()} as the {@link java.text.DateFormat} object.
*/
public DateRecognizerSinkFilter() {
- this(SimpleDateFormat.getDateInstance());
+ this(DateFormat.getDateInstance());
}
public DateRecognizerSinkFilter(DateFormat dateFormat) {
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java
index b22fc2c5dbf..c2efbbd00fb 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java
@@ -30,7 +30,6 @@ import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@ public final class SwedishAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java
index bace03ee7d6..91997a752ae 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis.th;
import java.io.Reader;
import org.apache.lucene.analysis.ReusableAnalyzerBase;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter;
@@ -45,12 +44,14 @@ public final class ThaiAnalyzer extends ReusableAnalyzerBase {
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a
- * {@link StandardTokenizer} filtered with {@link StandardFilter},
- * {@link ThaiWordFilter}, and {@link StopFilter}
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link ThaiWordFilter}, and
+ * {@link StopFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
diff --git a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java
index 0d97e63f9a0..41747e0bdc2 100644
--- a/contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java
+++ b/contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java
@@ -28,7 +28,6 @@ import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -109,11 +108,14 @@ public final class TurkishAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link TurkishLowerCaseFilter},
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link TurkishLowerCaseFilter},
* {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
* exclusion set is provided and {@link SnowballFilter}.
*/
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java
index ddc9f287ef7..45bf87528cc 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java
@@ -24,7 +24,6 @@ import java.util.Set;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
-import org.apache.lucene.util.Version;
/**
* Test the Arabic Analyzer
@@ -35,14 +34,14 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new ArabicAnalyzer(Version.LUCENE_CURRENT);
+ new ArabicAnalyzer(TEST_VERSION_CURRENT);
}
/**
* Some simple tests showing some features of the analyzer, how some regular forms will conflate
*/
public void testBasicFeatures() throws Exception {
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "كبير", new String[] { "كبير" });
assertAnalyzesTo(a, "كبيرة", new String[] { "كبير" }); // feminine marker
@@ -63,7 +62,7 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
* Simple tests to show things are getting reset correctly, etc.
*/
public void testReusableTokenStream() throws Exception {
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "كبير", new String[] { "كبير" });
assertAnalyzesToReuse(a, "كبيرة", new String[] { "كبير" }); // feminine marker
}
@@ -72,7 +71,7 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
* Non-arabic text gets treated in a similar way as SimpleAnalyzer.
*/
public void testEnglishInput() throws Exception {
- assertAnalyzesTo(new ArabicAnalyzer(Version.LUCENE_CURRENT), "English text.", new String[] {
+ assertAnalyzesTo(new ArabicAnalyzer(TEST_VERSION_CURRENT), "English text.", new String[] {
"english", "text" });
}
@@ -82,7 +81,7 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
public void testCustomStopwords() throws Exception {
Set set = new HashSet();
Collections.addAll(set, "the", "and", "a");
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT, set);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT, set);
assertAnalyzesTo(a, "The quick brown fox.", new String[] { "quick",
"brown", "fox" });
}
@@ -90,12 +89,12 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
public void testWithStemExclusionSet() throws IOException {
Set set = new HashSet();
set.add("ساهدهات");
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهدهات" });
assertAnalyzesToReuse(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهدهات" });
- a = new ArabicAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, CharArraySet.EMPTY_SET);
+ a = new ArabicAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, CharArraySet.EMPTY_SET);
assertAnalyzesTo(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهد" });
assertAnalyzesToReuse(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهد" });
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java
index 4eda93ef879..044ce997503 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java
@@ -21,11 +21,9 @@ import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
/**
* Test the Arabic Normalization Filter
- *
*/
public class TestArabicNormalizationFilter extends BaseTokenStreamTestCase {
@@ -86,7 +84,7 @@ public class TestArabicNormalizationFilter extends BaseTokenStreamTestCase {
}
private void check(final String input, final String expected) throws IOException {
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT, new StringReader(input));
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
ArabicNormalizationFilter filter = new ArabicNormalizationFilter(tokenStream);
assertTokenStreamContents(filter, new String[]{expected});
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java
index e131b10c2d1..323765bf5b8 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java
@@ -23,7 +23,6 @@ import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
-import org.apache.lucene.util.Version;
/**
* Test the Arabic Normalization Filter
@@ -116,16 +115,16 @@ public class TestArabicStemFilter extends BaseTokenStreamTestCase {
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("ساهدهات");
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT, new StringReader("ساهدهات"));
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader("ساهدهات"));
ArabicStemFilter filter = new ArabicStemFilter(new KeywordMarkerTokenFilter(tokenStream, set));
assertTokenStreamContents(filter, new String[]{"ساهدهات"});
}
private void check(final String input, final String expected) throws IOException {
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT, new StringReader(input));
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
ArabicStemFilter filter = new ArabicStemFilter(tokenStream);
assertTokenStreamContents(filter, new String[]{expected});
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java
index f4665b9d993..29799d2ae08 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java
@@ -34,23 +34,23 @@ public class TestBulgarianAnalyzer extends BaseTokenStreamTestCase {
* This test fails with NPE when the stopwords file is missing in classpath
*/
public void testResourcesAvailable() {
- new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ new BulgarianAnalyzer(TEST_VERSION_CURRENT);
}
public void testStopwords() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "Как се казваш?", new String[] {"казваш"});
}
public void testCustomStopwords() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT, Collections
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT, Collections
.emptySet());
assertAnalyzesTo(a, "Как се казваш?",
new String[] {"как", "се", "казваш"});
}
public void testReusableTokenStream() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "документи", new String[] {"документ"});
assertAnalyzesToReuse(a, "документ", new String[] {"документ"});
}
@@ -59,7 +59,7 @@ public class TestBulgarianAnalyzer extends BaseTokenStreamTestCase {
* Test some examples from the paper
*/
public void testBasicExamples() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "енергийни кризи", new String[] {"енергийн", "криз"});
assertAnalyzesTo(a, "Атомната енергия", new String[] {"атомн", "енерг"});
@@ -72,7 +72,7 @@ public class TestBulgarianAnalyzer extends BaseTokenStreamTestCase {
public void testWithStemExclusionSet() throws IOException {
CharArraySet set = new CharArraySet(Version.LUCENE_31, 1, true);
set.add("строеве");
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "строевете строеве", new String[] { "строй", "строеве" });
}
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java
index 23215d5c58f..be21270139b 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java
@@ -35,7 +35,7 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
* common (and some rare) plural pattern is listed.
*/
public void testMasculineNouns() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
// -и pattern
assertAnalyzesTo(a, "град", new String[] {"град"});
@@ -101,7 +101,7 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
* Test showing how feminine noun forms conflate
*/
public void testFeminineNouns() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "вест", new String[] {"вест"});
assertAnalyzesTo(a, "вестта", new String[] {"вест"});
@@ -114,7 +114,7 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
* plural pattern is listed
*/
public void testNeuterNouns() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
// -а pattern
assertAnalyzesTo(a, "дърво", new String[] {"дърв"});
@@ -142,7 +142,7 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
* Test showing how adjectival forms conflate
*/
public void testAdjectives() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "красив", new String[] {"красив"});
assertAnalyzesTo(a, "красивия", new String[] {"красив"});
assertAnalyzesTo(a, "красивият", new String[] {"красив"});
@@ -158,7 +158,7 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
* Test some exceptional rules, implemented as rewrites.
*/
public void testExceptions() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
// ци -> к
assertAnalyzesTo(a, "собственик", new String[] {"собственик"});
@@ -215,7 +215,7 @@ public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
public void testWithKeywordAttribute() throws IOException {
CharArraySet set = new CharArraySet(Version.LUCENE_31, 1, true);
set.add("строеве");
- WhitespaceTokenizer tokenStream = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ WhitespaceTokenizer tokenStream = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader("строевете строеве"));
BulgarianStemFilter filter = new BulgarianStemFilter(
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java
index 23958a8897b..a734099c611 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java
@@ -25,7 +25,6 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.LowerCaseTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Brazilian Stem Filter, which only modifies the term text.
@@ -128,7 +127,7 @@ public class TestBrazilianStemmer extends BaseTokenStreamTestCase {
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BrazilianAnalyzer(TEST_VERSION_CURRENT);
checkReuse(a, "boa", "boa");
checkReuse(a, "boainain", "boainain");
checkReuse(a, "boas", "boas");
@@ -136,35 +135,35 @@ public class TestBrazilianStemmer extends BaseTokenStreamTestCase {
}
public void testStemExclusionTable() throws Exception {
- BrazilianAnalyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
+ BrazilianAnalyzer a = new BrazilianAnalyzer(TEST_VERSION_CURRENT);
a.setStemExclusionTable(new String[] { "quintessência" });
checkReuse(a, "quintessência", "quintessência"); // excluded words will be completely unchanged.
}
public void testStemExclusionTableBWCompat() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("Brasília");
BrazilianStemFilter filter = new BrazilianStemFilter(
- new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader("Brasília Brasilia")), set);
+ new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader("Brasília Brasilia")), set);
assertTokenStreamContents(filter, new String[] { "brasília", "brasil" });
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("Brasília");
BrazilianStemFilter filter = new BrazilianStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Brasília Brasilia")), set));
assertTokenStreamContents(filter, new String[] { "brasília", "brasil" });
}
public void testWithKeywordAttributeAndExclusionTable() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("Brasília");
- CharArraySet set1 = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set1 = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set1.add("Brasilia");
BrazilianStemFilter filter = new BrazilianStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Brasília Brasilia")), set), set1);
assertTokenStreamContents(filter, new String[] { "brasília", "brasilia" });
}
@@ -174,14 +173,14 @@ public class TestBrazilianStemmer extends BaseTokenStreamTestCase {
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- BrazilianAnalyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
+ BrazilianAnalyzer a = new BrazilianAnalyzer(TEST_VERSION_CURRENT);
checkReuse(a, "quintessência", "quintessente");
a.setStemExclusionTable(new String[] { "quintessência" });
checkReuse(a, "quintessência", "quintessência");
}
private void check(final String input, final String expected) throws Exception {
- checkOneTerm(new BrazilianAnalyzer(Version.LUCENE_CURRENT), input, expected);
+ checkOneTerm(new BrazilianAnalyzer(TEST_VERSION_CURRENT), input, expected);
}
private void checkReuse(Analyzer a, String input, String expected) throws Exception {
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java
index 38945e7c9b0..18c208eb80c 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.util.Version;
public class TestCJKTokenizer extends BaseTokenStreamTestCase {
@@ -42,7 +41,7 @@ public class TestCJKTokenizer extends BaseTokenStreamTestCase {
}
public void checkCJKToken(final String str, final TestToken[] out_tokens) throws IOException {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
String terms[] = new String[out_tokens.length];
int startOffsets[] = new int[out_tokens.length];
int endOffsets[] = new int[out_tokens.length];
@@ -57,7 +56,7 @@ public class TestCJKTokenizer extends BaseTokenStreamTestCase {
}
public void checkCJKTokenReusable(final Analyzer a, final String str, final TestToken[] out_tokens) throws IOException {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
String terms[] = new String[out_tokens.length];
int startOffsets[] = new int[out_tokens.length];
int endOffsets[] = new int[out_tokens.length];
@@ -213,13 +212,13 @@ public class TestCJKTokenizer extends BaseTokenStreamTestCase {
}
public void testTokenStream() throws Exception {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "\u4e00\u4e01\u4e02",
new String[] { "\u4e00\u4e01", "\u4e01\u4e02"});
}
public void testReusableTokenStream() throws Exception {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
String str = "\u3042\u3044\u3046\u3048\u304aabc\u304b\u304d\u304f\u3051\u3053";
TestToken[] out_tokens = {
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
index a176f63bd18..a342c86b90e 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
@@ -28,17 +28,11 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
static final File dataDir = new File(System.getProperty("dataDir", "./bin"));
static final File testFile = new File(dataDir, "org/apache/lucene/analysis/compound/da_UTF8.xml");
- @Override
- protected void setUp() throws Exception {
- super.setUp();
- }
-
public void testHyphenationCompoundWordsDA() throws Exception {
String[] dict = { "læse", "hest" };
@@ -47,8 +41,8 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
HyphenationTree hyphenator = HyphenationCompoundWordTokenFilter
.getHyphenationTree(reader);
- HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"min veninde som er lidt af en læsehest")), hyphenator,
dict, CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
@@ -67,8 +61,8 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
.getHyphenationTree(reader);
// the word basket will not be added due to the longest match option
- HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"basketballkurv")), hyphenator, dict,
CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE, 40, true);
@@ -84,8 +78,8 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
"Pelar", "Glas", "Ögon", "Fodral", "Bas", "Fiol", "Makare", "Gesäll",
"Sko", "Vind", "Rute", "Torkare", "Blad" };
- DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader(
"Bildörr Bilmotor Biltak Slagborr Hammarborr Pelarborr Glasögonfodral Basfiolsfodral Basfiolsfodralmakaregesäll Skomakare Vindrutetorkare Vindrutetorkarblad abba")),
dict);
@@ -113,8 +107,8 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
"Pelar", "Glas", "Ögon", "Fodral", "Bas", "Fiols", "Makare", "Gesäll",
"Sko", "Vind", "Rute", "Torkare", "Blad", "Fiolsfodral" };
- DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("Basfiolsfodralmakaregesäll")),
+ DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("Basfiolsfodralmakaregesäll")),
dict, CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE, true);
@@ -129,9 +123,9 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
String[] dict = { "Rind", "Fleisch", "Draht", "Schere", "Gesetz",
"Aufgabe", "Überwachung" };
- Tokenizer wsTokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ Tokenizer wsTokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Rindfleischüberwachungsgesetz"));
- DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(Version.LUCENE_CURRENT,
+ DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT,
wsTokenizer, dict,
CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java
index 5c5bcd19e7b..35d52266e92 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java
@@ -48,7 +48,7 @@ public class TestCzechAnalyzer extends BaseTokenStreamTestCase {
}
public void testStopWord() throws Exception {
- assertAnalyzesTo(new CzechAnalyzer(Version.LUCENE_CURRENT), "Pokud mluvime o volnem",
+ assertAnalyzesTo(new CzechAnalyzer(TEST_VERSION_CURRENT), "Pokud mluvime o volnem",
new String[] { "mluvim", "voln" });
}
@@ -63,7 +63,7 @@ public class TestCzechAnalyzer extends BaseTokenStreamTestCase {
}
public void testReusableTokenStream() throws Exception {
- Analyzer analyzer = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CzechAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(analyzer, "Pokud mluvime o volnem", new String[] { "mluvim", "voln" });
assertAnalyzesToReuse(analyzer, "Česká Republika", new String[] { "česk", "republik" });
}
@@ -112,9 +112,9 @@ public class TestCzechAnalyzer extends BaseTokenStreamTestCase {
}
public void testWithStemExclusionSet() throws IOException{
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("hole");
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(cz, "hole desek", new String[] {"hole", "desk"});
}
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java
index d75fa493ceb..6396c57eeb7 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java
@@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Czech Stemmer.
@@ -38,7 +37,7 @@ public class TestCzechStemmer extends BaseTokenStreamTestCase {
* Test showing how masculine noun forms conflate
*/
public void testMasculineNouns() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* animate ending with a hard consonant */
assertAnalyzesTo(cz, "pán", new String[] { "pán" });
@@ -106,7 +105,7 @@ public class TestCzechStemmer extends BaseTokenStreamTestCase {
* Test showing how feminine noun forms conflate
*/
public void testFeminineNouns() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* ending with hard consonant */
assertAnalyzesTo(cz, "kost", new String[] { "kost" });
@@ -150,7 +149,7 @@ public class TestCzechStemmer extends BaseTokenStreamTestCase {
* Test showing how neuter noun forms conflate
*/
public void testNeuterNouns() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* ending with o */
assertAnalyzesTo(cz, "město", new String[] { "měst" });
@@ -193,7 +192,7 @@ public class TestCzechStemmer extends BaseTokenStreamTestCase {
* Test showing how adjectival forms conflate
*/
public void testAdjectives() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* ending with ý/á/é */
assertAnalyzesTo(cz, "mladý", new String[] { "mlad" });
@@ -221,7 +220,7 @@ public class TestCzechStemmer extends BaseTokenStreamTestCase {
* Test some possessive suffixes
*/
public void testPossessive() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(cz, "Karlův", new String[] { "karl" });
assertAnalyzesTo(cz, "jazykový", new String[] { "jazyk" });
}
@@ -230,7 +229,7 @@ public class TestCzechStemmer extends BaseTokenStreamTestCase {
* Test some exceptional rules, implemented as rewrites.
*/
public void testExceptions() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* rewrite of št -> sk */
assertAnalyzesTo(cz, "český", new String[] { "česk" });
@@ -270,16 +269,16 @@ public class TestCzechStemmer extends BaseTokenStreamTestCase {
* Test that very short words are not stemmed.
*/
public void testDontStem() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(cz, "e", new String[] { "e" });
assertAnalyzesTo(cz, "zi", new String[] { "zi" });
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("hole");
CzechStemFilter filter = new CzechStemFilter(new KeywordMarkerTokenFilter(
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("hole desek")), set));
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("hole desek")), set));
assertTokenStreamContents(filter, new String[] { "hole", "desk" });
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java
index 2b555537d83..cf38a1786e8 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestDanishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new DanishAnalyzer(Version.LUCENE_CURRENT);
+ new DanishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new DanishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new DanishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "undersøg", "undersøg");
checkOneTermReuse(a, "undersøgelse", "undersøg");
@@ -46,7 +45,7 @@ public class TestDanishAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("undersøgelse");
- Analyzer a = new DanishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new DanishAnalyzer(TEST_VERSION_CURRENT,
DanishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "undersøgelse", "undersøgelse");
checkOneTermReuse(a, "undersøg", "undersøg");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java
index 7dc0ad428d9..f13d9bd6b14 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java
@@ -29,38 +29,38 @@ import org.apache.lucene.util.Version;
public class TestGermanAnalyzer extends BaseTokenStreamTestCase {
public void testReusableTokenStream() throws Exception {
- Analyzer a = new GermanAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new GermanAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "Tisch", "tisch");
checkOneTermReuse(a, "Tische", "tisch");
checkOneTermReuse(a, "Tischen", "tisch");
}
public void testExclusionTableBWCompat() throws IOException {
- GermanStemFilter filter = new GermanStemFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT,
+ GermanStemFilter filter = new GermanStemFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT,
new StringReader("Fischen Trinken")));
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("fischen");
filter.setExclusionSet(set);
assertTokenStreamContents(filter, new String[] { "fischen", "trink" });
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("fischen");
GermanStemFilter filter = new GermanStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Fischen Trinken")), set));
assertTokenStreamContents(filter, new String[] { "fischen", "trink" });
}
public void testWithKeywordAttributeAndExclusionTable() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("fischen");
- CharArraySet set1 = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set1 = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set1.add("trinken");
set1.add("fischen");
GermanStemFilter filter = new GermanStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Fischen Trinken")), set));
filter.setExclusionSet(set1);
assertTokenStreamContents(filter, new String[] { "fischen", "trinken" });
@@ -71,7 +71,7 @@ public class TestGermanAnalyzer extends BaseTokenStreamTestCase {
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- GermanAnalyzer a = new GermanAnalyzer(Version.LUCENE_CURRENT);
+ GermanAnalyzer a = new GermanAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "tischen", "tisch");
a.setStemExclusionTable(new String[] { "tischen" });
checkOneTermReuse(a, "tischen", "tischen");
@@ -81,7 +81,7 @@ public class TestGermanAnalyzer extends BaseTokenStreamTestCase {
* these only pass with LUCENE_CURRENT, not if you use o.a.l.a.de.GermanStemmer
*/
public void testGermanSpecials() throws Exception {
- GermanAnalyzer a = new GermanAnalyzer(Version.LUCENE_CURRENT);
+ GermanAnalyzer a = new GermanAnalyzer(TEST_VERSION_CURRENT);
// a/o/u + e is equivalent to the umlaut form
checkOneTermReuse(a, "Schaltflächen", "schaltflach");
checkOneTermReuse(a, "Schaltflaechen", "schaltflach");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java
index 453189982c9..0b0cb78d4ea 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java
@@ -28,7 +28,6 @@ import org.apache.lucene.analysis.KeywordTokenizer;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the German stemmer. The stemming algorithm is known to work less
@@ -40,7 +39,7 @@ public class TestGermanStemFilter extends BaseTokenStreamTestCase {
public void testStemming() throws Exception {
Tokenizer tokenizer = new KeywordTokenizer(new StringReader(""));
- TokenFilter filter = new GermanStemFilter(new LowerCaseFilter(Version.LUCENE_CURRENT, tokenizer));
+ TokenFilter filter = new GermanStemFilter(new LowerCaseFilter(TEST_VERSION_CURRENT, tokenizer));
// read test cases from external file:
File dataDir = new File(System.getProperty("dataDir", "./bin"));
File testFile = new File(dataDir, "org/apache/lucene/analysis/de/data.txt");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java
index ec34d7e1697..6f87c1fa5e1 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java
@@ -32,7 +32,7 @@ public class GreekAnalyzerTest extends BaseTokenStreamTestCase {
* @throws Exception in case an error occurs
*/
public void testAnalyzer() throws Exception {
- Analyzer a = new GreekAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new GreekAnalyzer(TEST_VERSION_CURRENT);
// Verify the correct analysis of capitals and small accented letters
assertAnalyzesTo(a, "\u039c\u03af\u03b1 \u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03ac \u03ba\u03b1\u03bb\u03ae \u03ba\u03b1\u03b9 \u03c0\u03bb\u03bf\u03cd\u03c3\u03b9\u03b1 \u03c3\u03b5\u03b9\u03c1\u03ac \u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03ae\u03c1\u03c9\u03bd \u03c4\u03b7\u03c2 \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ae\u03c2 \u03b3\u03bb\u03ce\u03c3\u03c3\u03b1\u03c2",
new String[] { "\u03bc\u03b9\u03b1", "\u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03b1", "\u03ba\u03b1\u03bb\u03b7", "\u03c0\u03bb\u03bf\u03c5\u03c3\u03b9\u03b1", "\u03c3\u03b5\u03b9\u03c1\u03b1", "\u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03b7\u03c1\u03c9\u03bd",
@@ -48,7 +48,7 @@ public class GreekAnalyzerTest extends BaseTokenStreamTestCase {
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new GreekAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new GreekAnalyzer(TEST_VERSION_CURRENT);
// Verify the correct analysis of capitals and small accented letters
assertAnalyzesToReuse(a, "\u039c\u03af\u03b1 \u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03ac \u03ba\u03b1\u03bb\u03ae \u03ba\u03b1\u03b9 \u03c0\u03bb\u03bf\u03cd\u03c3\u03b9\u03b1 \u03c3\u03b5\u03b9\u03c1\u03ac \u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03ae\u03c1\u03c9\u03bd \u03c4\u03b7\u03c2 \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ae\u03c2 \u03b3\u03bb\u03ce\u03c3\u03c3\u03b1\u03c2",
new String[] { "\u03bc\u03b9\u03b1", "\u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03b1", "\u03ba\u03b1\u03bb\u03b7", "\u03c0\u03bb\u03bf\u03c5\u03c3\u03b9\u03b1", "\u03c3\u03b5\u03b9\u03c1\u03b1", "\u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03b7\u03c1\u03c9\u03bd",
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java
index eb5ee3a9b85..e35acf4a79a 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestEnglishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new EnglishAnalyzer(Version.LUCENE_CURRENT);
+ new EnglishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new EnglishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new EnglishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "books", "book");
checkOneTermReuse(a, "book", "book");
@@ -46,7 +45,7 @@ public class TestEnglishAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("books");
- Analyzer a = new EnglishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new EnglishAnalyzer(TEST_VERSION_CURRENT,
EnglishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "books", "books");
checkOneTermReuse(a, "book", "book");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java
index b1fa592f476..687573cd027 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestSpanishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new SpanishAnalyzer(Version.LUCENE_CURRENT);
+ new SpanishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new SpanishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new SpanishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "chicana", "chican");
checkOneTermReuse(a, "chicano", "chican");
@@ -46,7 +45,7 @@ public class TestSpanishAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("chicano");
- Analyzer a = new SpanishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new SpanishAnalyzer(TEST_VERSION_CURRENT,
SpanishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "chicana", "chican");
checkOneTermReuse(a, "chicano", "chicano");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java
index 34096b58c3c..452f3561af2 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis.fa;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.util.Version;
/**
* Test the Persian Analyzer
@@ -31,7 +30,7 @@ public class TestPersianAnalyzer extends BaseTokenStreamTestCase {
* This test fails with NPE when the stopwords file is missing in classpath
*/
public void testResourcesAvailable() {
- new PersianAnalyzer(Version.LUCENE_CURRENT);
+ new PersianAnalyzer(TEST_VERSION_CURRENT);
}
/**
@@ -42,7 +41,7 @@ public class TestPersianAnalyzer extends BaseTokenStreamTestCase {
* These verb forms are from http://en.wikipedia.org/wiki/Persian_grammar
*/
public void testBehaviorVerbs() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
// active present indicative
assertAnalyzesTo(a, "میخورد", new String[] { "خورد" });
// active preterite indicative
@@ -118,7 +117,7 @@ public class TestPersianAnalyzer extends BaseTokenStreamTestCase {
* These verb forms are from http://en.wikipedia.org/wiki/Persian_grammar
*/
public void testBehaviorVerbsDefective() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
// active present indicative
assertAnalyzesTo(a, "مي خورد", new String[] { "خورد" });
// active preterite indicative
@@ -189,7 +188,7 @@ public class TestPersianAnalyzer extends BaseTokenStreamTestCase {
* nouns, removing the plural -ha.
*/
public void testBehaviorNouns() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "برگ ها", new String[] { "برگ" });
assertAnalyzesTo(a, "برگها", new String[] { "برگ" });
}
@@ -199,7 +198,7 @@ public class TestPersianAnalyzer extends BaseTokenStreamTestCase {
* (lowercased, etc)
*/
public void testBehaviorNonPersian() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "English test.", new String[] { "english", "test" });
}
@@ -207,7 +206,7 @@ public class TestPersianAnalyzer extends BaseTokenStreamTestCase {
* Basic test ensuring that reusableTokenStream works correctly.
*/
public void testReusableTokenStream() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "خورده مي شده بوده باشد", new String[] { "خورده" });
assertAnalyzesToReuse(a, "برگها", new String[] { "برگ" });
}
@@ -216,7 +215,7 @@ public class TestPersianAnalyzer extends BaseTokenStreamTestCase {
* Test that custom stopwords work, and are not case-sensitive.
*/
public void testCustomStopwords() throws Exception {
- PersianAnalyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT, new String[] { "the", "and", "a" });
+ PersianAnalyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT, new String[] { "the", "and", "a" });
assertAnalyzesTo(a, "The quick brown fox.", new String[] { "quick",
"brown", "fox" });
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java
index 324b70c24d4..08193aecf97 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java
@@ -22,7 +22,6 @@ import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.ar.ArabicLetterTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Persian Normalization Filter
@@ -55,7 +54,7 @@ public class TestPersianNormalizationFilter extends BaseTokenStreamTestCase {
}
private void check(final String input, final String expected) throws IOException {
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT,
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT,
new StringReader(input));
PersianNormalizationFilter filter = new PersianNormalizationFilter(
tokenStream);
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java
index e7ea3b39910..379b0257575 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestFinnishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new FinnishAnalyzer(Version.LUCENE_CURRENT);
+ new FinnishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new FinnishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new FinnishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "edeltäjiinsä", "edeltäj");
checkOneTermReuse(a, "edeltäjistään", "edeltäj");
@@ -46,7 +45,7 @@ public class TestFinnishAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("edeltäjistään");
- Analyzer a = new FinnishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new FinnishAnalyzer(TEST_VERSION_CURRENT,
FinnishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "edeltäjiinsä", "edeltäj");
checkOneTermReuse(a, "edeltäjistään", "edeltäjistään");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java
index c602d2739d1..d7b23c8069a 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java
@@ -29,7 +29,6 @@ import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
/**
*
@@ -38,19 +37,19 @@ public class TestElision extends BaseTokenStreamTestCase {
public void testElision() throws Exception {
String test = "Plop, juste pour voir l'embrouille avec O'brian. M'enfin.";
- Tokenizer tokenizer = new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(test));
- Set articles = new HashSet();
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(test));
+ Set articles = new HashSet();
articles.add("l");
articles.add("M");
- TokenFilter filter = new ElisionFilter(Version.LUCENE_CURRENT, tokenizer, articles);
- List tas = filtre(filter);
+ TokenFilter filter = new ElisionFilter(TEST_VERSION_CURRENT, tokenizer, articles);
+ List tas = filter(filter);
assertEquals("embrouille", tas.get(4));
assertEquals("O'brian", tas.get(6));
assertEquals("enfin", tas.get(7));
}
- private List filtre(TokenFilter filter) throws IOException {
- List tas = new ArrayList();
+ private List filter(TokenFilter filter) throws IOException {
+ List tas = new ArrayList();
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
while (filter.incrementToken()) {
tas.add(termAtt.term());
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java
index 249ea57c69a..40bebc17829 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java
@@ -32,7 +32,7 @@ import org.apache.lucene.util.Version;
public class TestFrenchAnalyzer extends BaseTokenStreamTestCase {
public void testAnalyzer() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(fa, "", new String[] {
});
@@ -204,7 +204,7 @@ public class TestFrenchAnalyzer extends BaseTokenStreamTestCase {
}
public void testReusableTokenStream() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
// stopwords
assertAnalyzesToReuse(
fa,
@@ -229,27 +229,27 @@ public class TestFrenchAnalyzer extends BaseTokenStreamTestCase {
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(fa, "habitable", new String[] { "habit" });
fa.setStemExclusionTable(new String[] { "habitable" });
assertAnalyzesToReuse(fa, "habitable", new String[] { "habitable" });
}
public void testExclusionTableViaCtor() throws Exception {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("habitable");
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT,
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT,
CharArraySet.EMPTY_SET, set);
assertAnalyzesToReuse(fa, "habitable chiste", new String[] { "habitable",
"chist" });
- fa = new FrenchAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ fa = new FrenchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(fa, "habitable chiste", new String[] { "habitable",
"chist" });
}
public void testElision() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(fa, "voir l'embrouille", new String[] { "voir", "embrouill" });
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java
index fef46d27645..dcebfc34400 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java
@@ -5,7 +5,6 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -31,11 +30,11 @@ public class TestHindiAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new HindiAnalyzer(Version.LUCENE_CURRENT);
+ new HindiAnalyzer(TEST_VERSION_CURRENT);
}
public void testBasics() throws Exception {
- Analyzer a = new HindiAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new HindiAnalyzer(TEST_VERSION_CURRENT);
// two ways to write 'hindi' itself.
checkOneTermReuse(a, "हिन्दी", "हिंद");
checkOneTermReuse(a, "हिंदी", "हिंद");
@@ -44,7 +43,7 @@ public class TestHindiAnalyzer extends BaseTokenStreamTestCase {
public void testExclusionSet() throws Exception {
Set exclusionSet = new HashSet();
exclusionSet.add("हिंदी");
- Analyzer a = new HindiAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new HindiAnalyzer(TEST_VERSION_CURRENT,
HindiAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "हिंदी", "हिंदी");
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java
index 929087d9ff6..42c478163d8 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java
@@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test HindiNormalizer
@@ -60,7 +59,7 @@ public class TestHindiNormalizer extends BaseTokenStreamTestCase {
check("आईऊॠॡऐऔीूॄॣैौ", "अइउऋऌएओिुृॢेो");
}
private void check(String input, String output) throws IOException {
- Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader(input));
TokenFilter tf = new HindiNormalizationFilter(tokenizer);
assertTokenStreamContents(tf, new String[] { output });
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiStemmer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiStemmer.java
index 1eaaf2e4f52..cce0015cf67 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiStemmer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiStemmer.java
@@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test HindiStemmer
@@ -82,7 +81,7 @@ public class TestHindiStemmer extends BaseTokenStreamTestCase {
}
private void check(String input, String output) throws IOException {
- Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader(input));
TokenFilter tf = new HindiStemFilter(tokenizer);
assertTokenStreamContents(tf, new String[] { output });
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java
index e3ce9987b12..e00289f33bd 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestHungarianAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new HungarianAnalyzer(Version.LUCENE_CURRENT);
+ new HungarianAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new HungarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new HungarianAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "babakocsi", "babakocs");
checkOneTermReuse(a, "babakocsijáért", "babakocs");
@@ -46,7 +45,7 @@ public class TestHungarianAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("babakocsi");
- Analyzer a = new HungarianAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new HungarianAnalyzer(TEST_VERSION_CURRENT,
HungarianAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "babakocsi", "babakocsi");
checkOneTermReuse(a, "babakocsijáért", "babakocs");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicNormalizer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicNormalizer.java
index e97ad045782..b1ffd9b4b50 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicNormalizer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicNormalizer.java
@@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test IndicNormalizer
@@ -45,7 +44,7 @@ public class TestIndicNormalizer extends BaseTokenStreamTestCase {
}
private void check(String input, String output) throws IOException {
- Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader(input));
TokenFilter tf = new IndicNormalizationFilter(tokenizer);
assertTokenStreamContents(tf, new String[] { output });
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicTokenizer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicTokenizer.java
index 0bab19a6c49..9a2cd81a61e 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicTokenizer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicTokenizer.java
@@ -22,7 +22,6 @@ import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.util.Version;
/**
* Test IndicTokenizer
@@ -30,7 +29,7 @@ import org.apache.lucene.util.Version;
public class TestIndicTokenizer extends BaseTokenStreamTestCase {
/** Test tokenizing Indic vowels, signs, and punctuation */
public void testBasics() throws IOException {
- TokenStream ts = new IndicTokenizer(Version.LUCENE_CURRENT,
+ TokenStream ts = new IndicTokenizer(TEST_VERSION_CURRENT,
new StringReader("मुझे हिंदी का और अभ्यास करना होगा ।"));
assertTokenStreamContents(ts,
new String[] { "मुझे", "हिंदी", "का", "और", "अभ्यास", "करना", "होगा" });
@@ -38,7 +37,7 @@ public class TestIndicTokenizer extends BaseTokenStreamTestCase {
/** Test that words with format chars such as ZWJ are kept */
public void testFormat() throws Exception {
- TokenStream ts = new IndicTokenizer(Version.LUCENE_CURRENT,
+ TokenStream ts = new IndicTokenizer(TEST_VERSION_CURRENT,
new StringReader("शार्मा शार्मा"));
assertTokenStreamContents(ts, new String[] { "शार्मा", "शार्मा" });
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java
index c110dde9771..3348721298a 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestItalianAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new ItalianAnalyzer(Version.LUCENE_CURRENT);
+ new ItalianAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new ItalianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new ItalianAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "abbandonata", "abbandon");
checkOneTermReuse(a, "abbandonati", "abbandon");
@@ -46,7 +45,7 @@ public class TestItalianAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("abbandonata");
- Analyzer a = new ItalianAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new ItalianAnalyzer(TEST_VERSION_CURRENT,
ItalianAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "abbandonata", "abbandonata");
checkOneTermReuse(a, "abbandonati", "abbandon");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java
index fb2455438d3..a3464706916 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java
@@ -24,7 +24,6 @@ import java.util.regex.Pattern;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.util.Version;
/**
* Verifies the behavior of PatternAnalyzer.
@@ -37,13 +36,13 @@ public class PatternAnalyzerTest extends BaseTokenStreamTestCase {
*/
public void testNonWordPattern() throws IOException {
// Split on non-letter pattern, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
false, null);
check(a, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"The", "quick", "brown", "Fox", "the", "abcd", "dc" });
// split on non-letter pattern, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
+ PatternAnalyzer b = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"quick", "brown", "fox", "abcd", "dc" });
@@ -55,13 +54,13 @@ public class PatternAnalyzerTest extends BaseTokenStreamTestCase {
*/
public void testWhitespacePattern() throws IOException {
// Split on whitespace patterns, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
false, null);
check(a, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"The", "quick", "brown", "Fox,the", "abcd1234", "(56.78)", "dc." });
// Split on whitespace patterns, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer b = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"quick", "brown", "fox,the", "abcd1234", "(56.78)", "dc." });
@@ -73,12 +72,12 @@ public class PatternAnalyzerTest extends BaseTokenStreamTestCase {
*/
public void testCustomPattern() throws IOException {
// Split on comma, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, Pattern.compile(","), false, null);
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, Pattern.compile(","), false, null);
check(a, "Here,Are,some,Comma,separated,words,", new String[] { "Here",
"Are", "some", "Comma", "separated", "words" });
// split on comma, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, Pattern.compile(","), true,
+ PatternAnalyzer b = new PatternAnalyzer(TEST_VERSION_CURRENT, Pattern.compile(","), true,
StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "Here,Are,some,Comma,separated,words,", new String[] { "here",
"some", "comma", "separated", "words" });
@@ -103,7 +102,7 @@ public class PatternAnalyzerTest extends BaseTokenStreamTestCase {
document.append(largeWord2);
// Split on whitespace patterns, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
false, null);
check(a, document.toString(), new String[] { new String(largeWord),
new String(largeWord2) });
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java
index 157a5304e2b..1fe55e37a0a 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java
@@ -20,7 +20,6 @@ package org.apache.lucene.analysis.miscellaneous;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -31,7 +30,7 @@ public class TestPrefixAndSuffixAwareTokenFilter extends BaseTokenStreamTestCase
PrefixAndSuffixAwareTokenFilter ts = new PrefixAndSuffixAwareTokenFilter(
new SingleTokenTokenStream(createToken("^", 0, 0)),
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("hello world")),
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("hello world")),
new SingleTokenTokenStream(createToken("$", 0, 0)));
assertTokenStreamContents(ts,
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java
index 51d1fa6d9df..b10fc739b8e 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java
@@ -20,7 +20,6 @@ package org.apache.lucene.analysis.miscellaneous;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -42,7 +41,7 @@ public class TestPrefixAwareTokenFilter extends BaseTokenStreamTestCase {
// prefix and suffix using 2x prefix
ts = new PrefixAwareTokenFilter(new SingleTokenTokenStream(createToken("^", 0, 0)),
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("hello world")));
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("hello world")));
ts = new PrefixAwareTokenFilter(ts, new SingleTokenTokenStream(createToken("$", 0, 0)));
assertTokenStreamContents(ts,
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java
index 77e122efce9..a72274c8663 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java
@@ -10,7 +10,6 @@ import org.apache.lucene.analysis.KeywordTokenizer;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.util.Version;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -38,7 +37,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase {
dictionary.put("booked", "books");
Tokenizer tokenizer = new KeywordTokenizer(new StringReader("booked"));
TokenStream stream = new PorterStemFilter(
- new StemmerOverrideFilter(Version.LUCENE_CURRENT, tokenizer, dictionary));
+ new StemmerOverrideFilter(TEST_VERSION_CURRENT, tokenizer, dictionary));
assertTokenStreamContents(stream, new String[] { "books" });
}
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
index 481fe7a6208..346af2a353a 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
@@ -20,7 +20,6 @@ package org.apache.lucene.analysis.ngram;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
import java.io.StringReader;
@@ -31,9 +30,9 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
private TokenStream input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
}
public void testInvalidInput() throws Exception {
@@ -92,13 +91,13 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
}
public void testSmallTokenInStream() throws Exception {
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abc de fgh"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abc de fgh"));
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 3, 3);
assertTokenStreamContents(tokenizer, new String[]{"abc","fgh"}, new int[]{0,7}, new int[]{3,10});
}
public void testReset() throws Exception {
- WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
tokenizer.reset(new StringReader("abcde"));
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
index cf2686b65cb..159df05953a 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
@@ -29,7 +29,7 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
private StringReader input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
input = new StringReader("abcde");
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
index 0a6fa47b5ae..ed7c1701e52 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
@@ -20,7 +20,6 @@ package org.apache.lucene.analysis.ngram;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
import java.io.StringReader;
@@ -31,9 +30,9 @@ public class NGramTokenFilterTest extends BaseTokenStreamTestCase {
private TokenStream input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
}
public void testInvalidInput() throws Exception {
@@ -81,13 +80,13 @@ public class NGramTokenFilterTest extends BaseTokenStreamTestCase {
}
public void testSmallTokenInStream() throws Exception {
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abc de fgh"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abc de fgh"));
NGramTokenFilter filter = new NGramTokenFilter(input, 3, 3);
assertTokenStreamContents(filter, new String[]{"abc","fgh"}, new int[]{0,7}, new int[]{3,10});
}
public void testReset() throws Exception {
- WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
NGramTokenFilter filter = new NGramTokenFilter(tokenizer, 1, 1);
assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
tokenizer.reset(new StringReader("abcde"));
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
index 08289f80fc4..b41e3e4d1ca 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
@@ -29,7 +29,7 @@ public class NGramTokenizerTest extends BaseTokenStreamTestCase {
private StringReader input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
input = new StringReader("abcde");
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java
index cceaa0c5f6d..6d5357c95bf 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java
@@ -127,14 +127,14 @@ public class TestDutchStemmer extends BaseTokenStreamTestCase {
}
public void testSnowballCorrectness() throws Exception {
- Analyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "opheffen", "opheff");
checkOneTermReuse(a, "opheffende", "opheff");
checkOneTermReuse(a, "opheffing", "opheff");
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "lichaamsziek", "lichaamsziek");
checkOneTermReuse(a, "lichamelijk", "licham");
checkOneTermReuse(a, "lichamelijke", "licham");
@@ -146,7 +146,7 @@ public class TestDutchStemmer extends BaseTokenStreamTestCase {
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- DutchAnalyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "lichamelijk", "licham");
a.setStemExclusionTable(new String[] { "lichamelijk" });
checkOneTermReuse(a, "lichamelijk", "lichamelijk");
@@ -157,10 +157,10 @@ public class TestDutchStemmer extends BaseTokenStreamTestCase {
public void testExclusionTableViaCtor() throws IOException {
CharArraySet set = new CharArraySet(Version.LUCENE_30, 1, true);
set.add("lichamelijk");
- DutchAnalyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesToReuse(a, "lichamelijk lichamelijke", new String[] { "lichamelijk", "licham" });
- a = new DutchAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ a = new DutchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "lichamelijk lichamelijke", new String[] { "lichamelijk", "licham" });
}
@@ -170,7 +170,7 @@ public class TestDutchStemmer extends BaseTokenStreamTestCase {
* when using reusable token streams.
*/
public void testStemDictionaryReuse() throws Exception {
- DutchAnalyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "lichamelijk", "licham");
a.setStemDictionary(customDictFile);
checkOneTermReuse(a, "lichamelijk", "somethingentirelydifferent");
@@ -196,7 +196,7 @@ public class TestDutchStemmer extends BaseTokenStreamTestCase {
}
private void check(final String input, final String expected) throws Exception {
- checkOneTerm(new DutchAnalyzer(Version.LUCENE_CURRENT), input, expected);
+ checkOneTerm(new DutchAnalyzer(TEST_VERSION_CURRENT), input, expected);
}
}
\ No newline at end of file
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java
index 5a75ea1b2e7..ebcb607f983 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestNorwegianAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new NorwegianAnalyzer(Version.LUCENE_CURRENT);
+ new NorwegianAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new NorwegianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new NorwegianAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "havnedistriktene", "havnedistrikt");
checkOneTermReuse(a, "havnedistrikter", "havnedistrikt");
@@ -46,7 +45,7 @@ public class TestNorwegianAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("havnedistriktene");
- Analyzer a = new NorwegianAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new NorwegianAnalyzer(TEST_VERSION_CURRENT,
NorwegianAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "havnedistriktene", "havnedistriktene");
checkOneTermReuse(a, "havnedistrikter", "havnedistrikt");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java
index 88131adb304..1e8970bbbfc 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java
@@ -22,21 +22,15 @@ import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.Payload;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import java.io.StringReader;
-
-/**
- *
- *
- **/
public class DelimitedPayloadTokenFilterTest extends LuceneTestCase {
public void testPayloads() throws Exception {
String test = "The quick|JJ red|JJ fox|NN jumped|VB over the lazy|JJ brown|JJ dogs|NN";
DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter
- (new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)),
+ (new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)),
DelimitedPayloadTokenFilter.DEFAULT_DELIMITER, new IdentityEncoder());
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
@@ -57,7 +51,7 @@ public class DelimitedPayloadTokenFilterTest extends LuceneTestCase {
String test = "The quick|JJ red|JJ fox|NN jumped|VB over the lazy|JJ brown|JJ dogs|NN";
DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter
- (new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)),
+ (new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)),
DelimitedPayloadTokenFilter.DEFAULT_DELIMITER, new IdentityEncoder());
assertTermEquals("The", filter, null);
assertTermEquals("quick", filter, "JJ".getBytes("UTF-8"));
@@ -75,7 +69,7 @@ public class DelimitedPayloadTokenFilterTest extends LuceneTestCase {
public void testFloatEncoding() throws Exception {
String test = "The quick|1.0 red|2.0 fox|3.5 jumped|0.5 over the lazy|5 brown|99.3 dogs|83.7";
- DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)), '|', new FloatEncoder());
+ DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)), '|', new FloatEncoder());
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
assertTermEquals("The", filter, termAtt, payAtt, null);
@@ -93,7 +87,7 @@ public class DelimitedPayloadTokenFilterTest extends LuceneTestCase {
public void testIntEncoding() throws Exception {
String test = "The quick|1 red|2 fox|3 jumped over the lazy|5 brown|99 dogs|83";
- DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)), '|', new IntegerEncoder());
+ DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)), '|', new IntegerEncoder());
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
assertTermEquals("The", filter, termAtt, payAtt, null);
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
index 99b9a032d61..a0f479e6edc 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
@@ -23,7 +23,6 @@ import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -38,7 +37,7 @@ public class NumericPayloadTokenFilterTest extends BaseTokenStreamTestCase {
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";
- NumericPayloadTokenFilter nptf = new NumericPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test))), 3, "D");
+ NumericPayloadTokenFilter nptf = new NumericPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test))), 3, "D");
boolean seenDogs = false;
TermAttribute termAtt = nptf.getAttribute(TermAttribute.class);
TypeAttribute typeAtt = nptf.getAttribute(TypeAttribute.class);
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java
index decde6617bf..e503395e325 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java
@@ -21,7 +21,6 @@ import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.index.Payload;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -36,7 +35,7 @@ public class TokenOffsetPayloadTokenFilterTest extends BaseTokenStreamTestCase {
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";
- TokenOffsetPayloadTokenFilter nptf = new TokenOffsetPayloadTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)));
+ TokenOffsetPayloadTokenFilter nptf = new TokenOffsetPayloadTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)));
int count = 0;
PayloadAttribute payloadAtt = nptf.getAttribute(PayloadAttribute.class);
OffsetAttribute offsetAtt = nptf.getAttribute(OffsetAttribute.class);
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
index edcb1352cb4..b07bd72d79c 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
@@ -23,7 +23,6 @@ import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -38,7 +37,7 @@ public class TypeAsPayloadTokenFilterTest extends BaseTokenStreamTestCase {
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";
- TypeAsPayloadTokenFilter nptf = new TypeAsPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test))));
+ TypeAsPayloadTokenFilter nptf = new TypeAsPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test))));
int count = 0;
TermAttribute termAtt = nptf.getAttribute(TermAttribute.class);
TypeAttribute typeAtt = nptf.getAttribute(TypeAttribute.class);
@@ -48,7 +47,6 @@ public class TypeAsPayloadTokenFilterTest extends BaseTokenStreamTestCase {
assertTrue(typeAtt.type() + " is not null and it should be", typeAtt.type().equals(String.valueOf(Character.toUpperCase(termAtt.termBuffer()[0]))));
assertTrue("nextToken.getPayload() is null and it shouldn't be", payloadAtt.getPayload() != null);
String type = new String(payloadAtt.getPayload().getData(), "UTF-8");
- assertTrue("type is null and it shouldn't be", type != null);
assertTrue(type + " is not equal to " + typeAtt.type(), type.equals(typeAtt.type()) == true);
count++;
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java
index 0762d3ae701..35befb76c8b 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestPortugueseAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new PortugueseAnalyzer(Version.LUCENE_CURRENT);
+ new PortugueseAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new PortugueseAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PortugueseAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "quilométricas", "quilométr");
checkOneTermReuse(a, "quilométricos", "quilométr");
@@ -46,7 +45,7 @@ public class TestPortugueseAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("quilométricas");
- Analyzer a = new PortugueseAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new PortugueseAnalyzer(TEST_VERSION_CURRENT,
PortugueseAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "quilométricas", "quilométricas");
checkOneTermReuse(a, "quilométricos", "quilométr");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
index 53cfea93505..f6cad045a16 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
@@ -37,7 +37,6 @@ import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
String variedFieldValues[] = {"the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "boring", "dog"};
@@ -51,7 +50,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
protected void setUp() throws Exception {
super.setUp();
dir = new RAMDirectory();
- appAnalyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ appAnalyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(dir, appAnalyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
int numDocs = 200;
for (int i = 0; i < numDocs; i++) {
@@ -64,7 +63,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
}
writer.close();
reader = IndexReader.open(dir, true);
- protectedAnalyzer = new QueryAutoStopWordAnalyzer(Version.LUCENE_CURRENT, appAnalyzer);
+ protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer);
}
@Override
@@ -75,7 +74,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
//Helper method to query
private int search(Analyzer a, String queryString) throws IOException, ParseException {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "repetitiveField", a);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "repetitiveField", a);
Query q = qp.parse(queryString);
return new IndexSearcher(reader).search(q, null, 1000).totalHits;
}
@@ -157,14 +156,14 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
if (++invocationCount % 2 == 0)
- return new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
+ return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
else
- return new LetterTokenizer(Version.LUCENE_CURRENT, reader);
+ return new LetterTokenizer(TEST_VERSION_CURRENT, reader);
}
}
public void testWrappingNonReusableAnalyzer() throws Exception {
- QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(Version.LUCENE_CURRENT, new NonreusableAnalyzer());
+ QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, new NonreusableAnalyzer());
a.addStopWords(reader, 10);
int numHits = search(a, "repetitiveField:boring");
assertTrue(numHits == 0);
@@ -173,7 +172,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
}
public void testTokenStream() throws Exception {
- QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(Version.LUCENE_CURRENT, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
a.addStopWords(reader, 10);
TokenStream ts = a.tokenStream("repetitiveField", new StringReader("this boring"));
TermAttribute termAtt = ts.getAttribute(TermAttribute.class);
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java
index 98f0d5c0e5a..fe1a3197bcc 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java
@@ -27,9 +27,9 @@ import org.apache.lucene.util.Version;
public class TestReverseStringFilter extends BaseTokenStreamTestCase {
public void testFilter() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader("Do have a nice day")); // 1-4 length string
- ReverseStringFilter filter = new ReverseStringFilter(Version.LUCENE_CURRENT, stream);
+ ReverseStringFilter filter = new ReverseStringFilter(TEST_VERSION_CURRENT, stream);
TermAttribute text = filter.getAttribute(TermAttribute.class);
assertTrue(filter.incrementToken());
assertEquals("oD", text.term());
@@ -45,9 +45,9 @@ public class TestReverseStringFilter extends BaseTokenStreamTestCase {
}
public void testFilterWithMark() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Do have a nice day")); // 1-4 length string
- ReverseStringFilter filter = new ReverseStringFilter(Version.LUCENE_CURRENT, stream, '\u0001');
+ ReverseStringFilter filter = new ReverseStringFilter(TEST_VERSION_CURRENT, stream, '\u0001');
TermAttribute text = filter
.getAttribute(TermAttribute.class);
assertTrue(filter.incrementToken());
@@ -64,14 +64,14 @@ public class TestReverseStringFilter extends BaseTokenStreamTestCase {
}
public void testReverseString() throws Exception {
- assertEquals( "A", ReverseStringFilter.reverse( "A" ) );
- assertEquals( "BA", ReverseStringFilter.reverse( "AB" ) );
- assertEquals( "CBA", ReverseStringFilter.reverse( "ABC" ) );
+ assertEquals( "A", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "A" ) );
+ assertEquals( "BA", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "AB" ) );
+ assertEquals( "CBA", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "ABC" ) );
}
public void testReverseChar() throws Exception {
char[] buffer = { 'A', 'B', 'C', 'D', 'E', 'F' };
- ReverseStringFilter.reverse( buffer, 2, 3 );
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 2, 3 );
assertEquals( "ABEDCF", new String( buffer ) );
}
@@ -84,37 +84,37 @@ public class TestReverseStringFilter extends BaseTokenStreamTestCase {
public void testReverseSupplementary() throws Exception {
// supplementary at end
- assertEquals("𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "瀛愯䇹鍟艱𩬅"));
+ assertEquals("𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "瀛愯䇹鍟艱𩬅"));
// supplementary at end - 1
- assertEquals("a𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "瀛愯䇹鍟艱𩬅a"));
+ assertEquals("a𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "瀛愯䇹鍟艱𩬅a"));
// supplementary at start
- assertEquals("fedcba𩬅", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "𩬅abcdef"));
+ assertEquals("fedcba𩬅", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "𩬅abcdef"));
// supplementary at start + 1
- assertEquals("fedcba𩬅z", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "z𩬅abcdef"));
+ assertEquals("fedcba𩬅z", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "z𩬅abcdef"));
// supplementary medial
- assertEquals("gfe𩬅dcba", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "abcd𩬅efg"));
+ assertEquals("gfe𩬅dcba", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "abcd𩬅efg"));
}
public void testReverseSupplementaryChar() throws Exception {
// supplementary at end
char[] buffer = "abc瀛愯䇹鍟艱𩬅".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 7);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
assertEquals("abc𩬅艱鍟䇹愯瀛", new String(buffer));
// supplementary at end - 1
buffer = "abc瀛愯䇹鍟艱𩬅d".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 8);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 8);
assertEquals("abcd𩬅艱鍟䇹愯瀛", new String(buffer));
// supplementary at start
buffer = "abc𩬅瀛愯䇹鍟艱".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 7);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
assertEquals("abc艱鍟䇹愯瀛𩬅", new String(buffer));
// supplementary at start + 1
buffer = "abcd𩬅瀛愯䇹鍟艱".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 8);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 8);
assertEquals("abc艱鍟䇹愯瀛𩬅d", new String(buffer));
// supplementary medial
buffer = "abc瀛愯𩬅def".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 7);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
assertEquals("abcfed𩬅愯瀛", new String(buffer));
}
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java
index 77c2b5fc5da..44e3424499f 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestRomanianAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new RomanianAnalyzer(Version.LUCENE_CURRENT);
+ new RomanianAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new RomanianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new RomanianAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "absenţa", "absenţ");
checkOneTermReuse(a, "absenţi", "absenţ");
@@ -46,7 +45,7 @@ public class TestRomanianAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("absenţa");
- Analyzer a = new RomanianAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new RomanianAnalyzer(TEST_VERSION_CURRENT,
RomanianAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "absenţa", "absenţa");
checkOneTermReuse(a, "absenţi", "absenţ");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java
index 9c6e6063b90..1e529842a3d 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java
@@ -44,8 +44,7 @@ public class TestRussianAnalyzer extends BaseTokenStreamTestCase
private File dataDir;
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
super.setUp();
dataDir = new File(System.getProperty("dataDir", "./bin"));
}
@@ -71,7 +70,7 @@ public class TestRussianAnalyzer extends BaseTokenStreamTestCase
TokenStream in = ra.tokenStream("all", inWords);
RussianLetterTokenizer sample =
- new RussianLetterTokenizer(Version.LUCENE_CURRENT,
+ new RussianLetterTokenizer(TEST_VERSION_CURRENT,
sampleUnicode);
TermAttribute text = in.getAttribute(TermAttribute.class);
@@ -98,7 +97,7 @@ public class TestRussianAnalyzer extends BaseTokenStreamTestCase
public void testDigitsInRussianCharset()
{
Reader reader = new StringReader("text 1000");
- RussianAnalyzer ra = new RussianAnalyzer(Version.LUCENE_CURRENT);
+ RussianAnalyzer ra = new RussianAnalyzer(TEST_VERSION_CURRENT);
TokenStream stream = ra.tokenStream("", reader);
TermAttribute termText = stream.getAttribute(TermAttribute.class);
@@ -126,7 +125,7 @@ public class TestRussianAnalyzer extends BaseTokenStreamTestCase
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new RussianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new RussianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "Вместе с тем о силе электромагнитной энергии имели представление еще",
new String[] { "вмест", "сил", "электромагнитн", "энерг", "имел", "представлен" });
assertAnalyzesToReuse(a, "Но знание это хранилось в тайне",
@@ -135,9 +134,9 @@ public class TestRussianAnalyzer extends BaseTokenStreamTestCase
public void testWithStemExclusionSet() throws Exception {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("представление");
- Analyzer a = new RussianAnalyzer(Version.LUCENE_CURRENT, RussianAnalyzer.getDefaultStopSet() , set);
+ Analyzer a = new RussianAnalyzer(TEST_VERSION_CURRENT, RussianAnalyzer.getDefaultStopSet() , set);
assertAnalyzesToReuse(a, "Вместе с тем о силе электромагнитной энергии имели представление еще",
new String[] { "вмест", "сил", "электромагнитн", "энерг", "имел", "представление" });
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java
index 55c4e7ef79d..c61f45a8a16 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java
@@ -30,8 +30,8 @@ import java.util.ArrayList;
@Deprecated
public class TestRussianStem extends LuceneTestCase
{
- private ArrayList words = new ArrayList();
- private ArrayList stems = new ArrayList();
+ private ArrayList words = new ArrayList();
+ private ArrayList stems = new ArrayList();
public TestRussianStem(String name)
{
@@ -42,8 +42,7 @@ public class TestRussianStem extends LuceneTestCase
* @see TestCase#setUp()
*/
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
super.setUp();
//System.out.println(new java.util.Date());
String str;
@@ -75,15 +74,6 @@ public class TestRussianStem extends LuceneTestCase
inStems.close();
}
- /**
- * @see TestCase#tearDown()
- */
- @Override
- protected void tearDown() throws Exception
- {
- super.tearDown();
- }
-
public void testStem()
{
for (int i = 0; i < words.size(); i++)
@@ -91,7 +81,7 @@ public class TestRussianStem extends LuceneTestCase
//if ( (i % 100) == 0 ) System.err.println(i);
String realStem =
RussianStemmer.stemWord(
- (String) words.get(i));
+ words.get(i));
assertEquals("unicode", stems.get(i), realStem);
}
}
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
index d8a3c57e3f0..234f9b7f7f9 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
@@ -42,7 +42,6 @@ import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
/**
* A test class for ShingleAnalyzerWrapper as regards queries and scoring.
@@ -86,7 +85,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
protected ScoreDoc[] queryParsingTest(Analyzer analyzer, String qs) throws Exception {
searcher = setUpSearcher(analyzer);
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "content", analyzer);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "content", analyzer);
Query q = qp.parse(qs);
@@ -106,7 +105,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
*/
public void testShingleAnalyzerWrapperQueryParsing() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"test sentence");
int[] ranks = new int[] { 1, 2, 0 };
compareRanks(hits, ranks);
@@ -117,7 +116,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
*/
public void testShingleAnalyzerWrapperPhraseQueryParsingFails() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"\"this sentence\"");
int[] ranks = new int[] { 0 };
compareRanks(hits, ranks);
@@ -128,7 +127,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
*/
public void testShingleAnalyzerWrapperPhraseQueryParsing() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"\"test sentence\"");
int[] ranks = new int[] { 1 };
compareRanks(hits, ranks);
@@ -139,7 +138,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
*/
public void testShingleAnalyzerWrapperRequiredQueryParsing() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"+test +sentence");
int[] ranks = new int[] { 1, 2 };
compareRanks(hits, ranks);
@@ -149,7 +148,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
* This shows how to construct a phrase query containing shingles.
*/
public void testShingleAnalyzerWrapperPhraseQuery() throws Exception {
- Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2);
+ Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
searcher = setUpSearcher(analyzer);
PhraseQuery q = new PhraseQuery();
@@ -178,7 +177,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
* in the right order and adjacent to each other.
*/
public void testShingleAnalyzerWrapperBooleanQuery() throws Exception {
- Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2);
+ Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
searcher = setUpSearcher(analyzer);
BooleanQuery q = new BooleanQuery();
@@ -200,7 +199,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2);
+ Analyzer a = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
assertAnalyzesToReuse(a, "please divide into shingles",
new String[] { "please", "please divide", "divide", "divide into", "into", "into shingles", "shingles" },
new int[] { 0, 0, 7, 7, 14, 14, 19 },
@@ -222,9 +221,9 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
if (++invocationCount % 2 == 0)
- return new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
+ return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
else
- return new LetterTokenizer(Version.LUCENE_CURRENT, reader);
+ return new LetterTokenizer(TEST_VERSION_CURRENT, reader);
}
}
@@ -249,7 +248,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
public void testNonDefaultMinShingleSize() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(), 3, 4);
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 3, 4);
assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles",
new String[] { "please", "please divide this", "please divide this sentence",
"divide", "divide this sentence", "divide this sentence into",
@@ -273,7 +272,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
public void testNonDefaultMinAndSameMaxShingleSize() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(), 3, 3);
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 3, 3);
assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles",
new String[] { "please", "please divide this",
"divide", "divide this sentence",
@@ -297,7 +296,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
public void testNoTokenSeparator() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer());
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.setTokenSeparator("");
assertAnalyzesToReuse(analyzer, "please divide into shingles",
new String[] { "please", "pleasedivide",
@@ -319,7 +318,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
public void testNullTokenSeparator() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer());
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.setTokenSeparator(null);
assertAnalyzesToReuse(analyzer, "please divide into shingles",
new String[] { "please", "pleasedivide",
@@ -340,7 +339,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
}
public void testAltTokenSeparator() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer());
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.setTokenSeparator("");
assertAnalyzesToReuse(analyzer, "please divide into shingles",
new String[] { "please", "pleasedivide",
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
index 982fc0afc82..38d5c074aa8 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
@@ -26,7 +26,6 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.*;
-import org.apache.lucene.util.Version;
public class ShingleFilterTest extends BaseTokenStreamTestCase {
@@ -836,7 +835,7 @@ public class ShingleFilterTest extends BaseTokenStreamTestCase {
public void testReset() throws Exception {
- Tokenizer wsTokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("please divide this sentence"));
+ Tokenizer wsTokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("please divide this sentence"));
TokenStream filter = new ShingleFilter(wsTokenizer, 2);
assertTokenStreamContents(filter,
new String[]{"please","please divide","divide","divide this","this","this sentence","sentence"},
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java
index 7f7ee15ec1a..3a8aa3d697a 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java
@@ -31,7 +31,6 @@ import org.apache.lucene.analysis.payloads.PayloadHelper;
import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix;
import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column;
import org.apache.lucene.analysis.tokenattributes.*;
-import org.apache.lucene.util.Version;
public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
@@ -41,11 +40,11 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
public void testIterator() throws IOException {
- WhitespaceTokenizer wst = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("one two three four five"));
+ WhitespaceTokenizer wst = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("one two three four five"));
ShingleMatrixFilter smf = new ShingleMatrixFilter(wst, 2, 2, '_', false, new ShingleMatrixFilter.OneDimensionalNonWeightedTokenSettingsCodec());
int i;
- for(i=0; smf.incrementToken(); i++);
+ for(i=0; smf.incrementToken(); i++) {}
assertEquals(4, i);
// call next once more. this should return false again rather than throwing an exception (LUCENE-1939)
@@ -65,11 +64,11 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
assertFalse(ts.incrementToken());
TokenListStream tls;
- LinkedList tokens;
+ LinkedList tokens;
// test a plain old token stream with synonyms translated to rows.
- tokens = new LinkedList();
+ tokens = new LinkedList();
tokens.add(createToken("please", 0, 6));
tokens.add(createToken("divide", 7, 13));
tokens.add(createToken("this", 14, 18));
@@ -101,11 +100,11 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
TokenStream ts;
TokenStream tls;
- LinkedList tokens;
+ LinkedList tokens;
// test a plain old token stream with synonyms tranlated to rows.
- tokens = new LinkedList();
+ tokens = new LinkedList();
tokens.add(tokenFactory("hello", 1, 0, 4));
tokens.add(tokenFactory("greetings", 0, 0, 4));
tokens.add(tokenFactory("world", 1, 5, 10));
@@ -145,7 +144,7 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
ShingleMatrixFilter.defaultSettingsCodec = new ShingleMatrixFilter.SimpleThreeDimensionalTokenSettingsCodec();
- tokens = new LinkedList();
+ tokens = new LinkedList();
tokens.add(tokenFactory("hello", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newColumn));
tokens.add(tokenFactory("greetings", 0, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newRow));
tokens.add(tokenFactory("world", 1, 1f, 5, 10, ShingleMatrixFilter.TokenPositioner.newColumn));
@@ -286,7 +285,7 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
//
- tokens = new LinkedList();
+ tokens = new LinkedList();
tokens.add(tokenFactory("hello", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newColumn));
tokens.add(tokenFactory("greetings", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newRow));
tokens.add(tokenFactory("and", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.sameRow));
@@ -413,11 +412,6 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
}
- private Token tokenFactory(String text, int startOffset, int endOffset) {
- return tokenFactory(text, 1, 1f, startOffset, endOffset);
- }
-
-
private Token tokenFactory(String text, int posIncr, int startOffset, int endOffset) {
Token token = new Token(startOffset, endOffset);
token.setTermBuffer(text);
@@ -430,10 +424,6 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
return tokenFactory(text, posIncr, 1f, 0, 0);
}
- private Token tokenFactory(String text, int posIncr, float weight) {
- return tokenFactory(text, posIncr, weight, 0, 0);
- }
-
private Token tokenFactory(String text, int posIncr, float weight, int startOffset, int endOffset) {
Token token = new Token(startOffset, endOffset);
token.setTermBuffer(text);
@@ -460,17 +450,6 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
assertEquals(text, termAtt.term());
}
- private void assertNext(TokenStream ts, String text, int positionIncrement, float boost) throws IOException {
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
- PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
- PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
-
- assertTrue(ts.incrementToken());
- assertEquals(text, termAtt.term());
- assertEquals(positionIncrement, posIncrAtt.getPositionIncrement());
- assertEquals(boost, payloadAtt.getPayload() == null ? 1f : PayloadHelper.decodeFloat(payloadAtt.getPayload().getData()), 0);
- }
-
private void assertNext(TokenStream ts, String text, int positionIncrement, float boost, int startOffset, int endOffset) throws IOException {
TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
@@ -505,7 +484,7 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
public static class TokenListStream extends TokenStream {
- private Collection tokens;
+ private Collection tokens;
TermAttribute termAtt;
PositionIncrementAttribute posIncrAtt;
PayloadAttribute payloadAtt;
@@ -513,7 +492,7 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
TypeAttribute typeAtt;
FlagsAttribute flagsAtt;
- public TokenListStream(Collection tokens) {
+ public TokenListStream(Collection tokens) {
this.tokens = tokens;
termAtt = addAttribute(TermAttribute.class);
posIncrAtt = addAttribute(PositionIncrementAttribute.class);
@@ -523,7 +502,7 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
flagsAtt = addAttribute(FlagsAttribute.class);
}
- private Iterator iterator;
+ private Iterator iterator;
@Override
public boolean incrementToken() throws IOException {
@@ -533,7 +512,7 @@ public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
if (!iterator.hasNext()) {
return false;
}
- Token prototype = (Token) iterator.next();
+ Token prototype = iterator.next();
clearAttributes();
termAtt.setTermBuffer(prototype.termBuffer(), 0, prototype.termLength());
posIncrAtt.setPositionIncrement(prototype.getPositionIncrement());
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java
index 065598f8904..b5c9e8e74ef 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java
@@ -25,7 +25,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TeeSinkTokenFilter;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkTokenStream;
-import org.apache.lucene.util.Version;
public class DateRecognizerSinkTokenizerTest extends BaseTokenStreamTestCase {
@@ -37,7 +36,7 @@ public class DateRecognizerSinkTokenizerTest extends BaseTokenStreamTestCase {
public void test() throws IOException {
DateRecognizerSinkFilter sinkFilter = new DateRecognizerSinkFilter(new SimpleDateFormat("MM/dd/yyyy", Locale.US));
String test = "The quick red fox jumped over the lazy brown dogs on 7/11/2006 The dogs finally reacted on 7/12/2006";
- TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)));
+ TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)));
SinkTokenStream sink = tee.newSinkTokenStream(sinkFilter);
int count = 0;
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java
index d22ad44ac5f..d2b3b72938e 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java
@@ -23,7 +23,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TeeSinkTokenFilter;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkTokenStream;
-import org.apache.lucene.util.Version;
public class TokenRangeSinkTokenizerTest extends BaseTokenStreamTestCase {
@@ -35,7 +34,7 @@ public class TokenRangeSinkTokenizerTest extends BaseTokenStreamTestCase {
public void test() throws IOException {
TokenRangeSinkFilter sinkFilter = new TokenRangeSinkFilter(2, 4);
String test = "The quick red fox jumped over the lazy brown dogs";
- TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)));
+ TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)));
SinkTokenStream rangeToks = tee.newSinkTokenStream(sinkFilter);
int count = 0;
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java
index 1e14e8f5655..cf7941e0a0e 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java
@@ -27,11 +27,9 @@ import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkTokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Version;
public class TokenTypeSinkTokenizerTest extends BaseTokenStreamTestCase {
-
public TokenTypeSinkTokenizerTest(String s) {
super(s);
}
@@ -40,7 +38,7 @@ public class TokenTypeSinkTokenizerTest extends BaseTokenStreamTestCase {
TokenTypeSinkFilter sinkFilter = new TokenTypeSinkFilter("D");
String test = "The quick red fox jumped over the lazy brown dogs";
- TeeSinkTokenFilter ttf = new TeeSinkTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test))));
+ TeeSinkTokenFilter ttf = new TeeSinkTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test))));
SinkTokenStream sink = ttf.newSinkTokenStream(sinkFilter);
boolean seenDogs = false;
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
index d5c5f868c64..4b456633e56 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
@@ -33,13 +33,13 @@ import org.apache.lucene.util.Version;
public class TestSnowball extends BaseTokenStreamTestCase {
public void testEnglish() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English");
assertAnalyzesTo(a, "he abhorred accents",
new String[]{"he", "abhor", "accent"});
}
public void testStopwords() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English",
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English",
StandardAnalyzer.STOP_WORDS_SET);
assertAnalyzesTo(a, "the quick brown fox jumped",
new String[]{"quick", "brown", "fox", "jump"});
@@ -50,7 +50,7 @@ public class TestSnowball extends BaseTokenStreamTestCase {
* we lowercase I correct for non-Turkish languages in either case.
*/
public void testEnglishLowerCase() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English");
assertAnalyzesTo(a, "cryogenic", new String[] { "cryogen" });
assertAnalyzesTo(a, "CRYOGENIC", new String[] { "cryogen" });
@@ -63,7 +63,7 @@ public class TestSnowball extends BaseTokenStreamTestCase {
* Test turkish lowercasing
*/
public void testTurkish() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "Turkish");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "Turkish");
assertAnalyzesTo(a, "ağacı", new String[] { "ağaç" });
assertAnalyzesTo(a, "AĞACI", new String[] { "ağaç" });
@@ -84,7 +84,7 @@ public class TestSnowball extends BaseTokenStreamTestCase {
public void testReusableTokenStream() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English");
assertAnalyzesToReuse(a, "he abhorred accents",
new String[]{"he", "abhor", "accent"});
assertAnalyzesToReuse(a, "she abhorred him",
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java
index ac9e317a7a1..d64ad1dbc6c 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestSwedishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new SwedishAnalyzer(Version.LUCENE_CURRENT);
+ new SwedishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new SwedishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new SwedishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "jaktkarlarne", "jaktkarl");
checkOneTermReuse(a, "jaktkarlens", "jaktkarl");
@@ -46,7 +45,7 @@ public class TestSwedishAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("jaktkarlarne");
- Analyzer a = new SwedishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new SwedishAnalyzer(TEST_VERSION_CURRENT,
SwedishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "jaktkarlarne", "jaktkarlarne");
checkOneTermReuse(a, "jaktkarlens", "jaktkarl");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
index ba532d670de..35458a72996 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
@@ -18,7 +18,6 @@ package org.apache.lucene.analysis.th;
*/
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
/**
* Test case for ThaiAnalyzer, modified from TestFrenchAnalyzer
@@ -32,7 +31,7 @@ public class TestThaiAnalyzer extends BaseTokenStreamTestCase {
* testcase for offsets
*/
public void testOffsets() throws Exception {
- assertAnalyzesTo(new ThaiAnalyzer(Version.LUCENE_CURRENT), "เดอะนิวยอร์กไทมส์",
+ assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT), "เดอะนิวยอร์กไทมส์",
new String[] { "เด", "อะนิว", "ยอ", "ร์ก", "ไทมส์"},
new int[] { 0, 2, 7, 9, 12 },
new int[] { 2, 7, 9, 12, 17});
@@ -50,7 +49,7 @@ public class TestThaiAnalyzer extends BaseTokenStreamTestCase {
* Instead, allow the definition of alphanum to include relevant categories like nonspacing marks!
*/
public void testBuggyTokenType() throws Exception {
- assertAnalyzesTo(new ThaiAnalyzer(Version.LUCENE_CURRENT), "เดอะนิวยอร์กไทมส์ ๑๒๓",
+ assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT), "เดอะนิวยอร์กไทมส์ ๑๒๓",
new String[] { "เด", "อะนิว", "ยอ", "ร์ก", "ไทมส์", "๑๒๓" },
new String[] { "", "", "", "", "", "" });
}
@@ -64,7 +63,7 @@ public class TestThaiAnalyzer extends BaseTokenStreamTestCase {
*/
public void testAnalyzer() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_CURRENT);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "", new String[] {});
@@ -89,7 +88,7 @@ public class TestThaiAnalyzer extends BaseTokenStreamTestCase {
* Test that position increments are adjusted correctly for stopwords.
*/
public void testPositionIncrements() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_CURRENT);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "ประโยคว่า the ประโยคว่า",
new String[] { "ประโยค", "ว่า", "ประโยค", "ว่า" },
@@ -106,7 +105,7 @@ public class TestThaiAnalyzer extends BaseTokenStreamTestCase {
}
public void testReusableTokenStream() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_CURRENT);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(analyzer, "", new String[] {});
assertAnalyzesToReuse(
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java
index 2f7b590fcd9..cf8fed9a451 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java
@@ -23,18 +23,17 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestTurkishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new TurkishAnalyzer(Version.LUCENE_CURRENT);
+ new TurkishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new TurkishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new TurkishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "ağacı", "ağaç");
checkOneTermReuse(a, "ağaç", "ağaç");
@@ -46,7 +45,7 @@ public class TestTurkishAnalyzer extends BaseTokenStreamTestCase {
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("ağacı");
- Analyzer a = new TurkishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new TurkishAnalyzer(TEST_VERSION_CURRENT,
TurkishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "ağacı", "ağacı");
checkOneTermReuse(a, "ağaç", "ağaç");
diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java
index 19a27f0e4ac..c2d1b1ddd0f 100644
--- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java
+++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java
@@ -22,7 +22,6 @@ import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Turkish lowercase filter.
@@ -33,7 +32,7 @@ public class TestTurkishLowerCaseFilter extends BaseTokenStreamTestCase {
* Test composed forms
*/
public void testTurkishLowerCaseFilter() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"\u0130STANBUL \u0130ZM\u0130R ISPARTA"));
TurkishLowerCaseFilter filter = new TurkishLowerCaseFilter(stream);
assertTokenStreamContents(filter, new String[] {"istanbul", "izmir",
@@ -44,7 +43,7 @@ public class TestTurkishLowerCaseFilter extends BaseTokenStreamTestCase {
* Test decomposed forms
*/
public void testDecomposed() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"\u0049\u0307STANBUL \u0049\u0307ZM\u0049\u0307R ISPARTA"));
TurkishLowerCaseFilter filter = new TurkishLowerCaseFilter(stream);
assertTokenStreamContents(filter, new String[] {"istanbul", "izmir",
@@ -57,7 +56,7 @@ public class TestTurkishLowerCaseFilter extends BaseTokenStreamTestCase {
* to U+0130 + U+0316, and is lowercased the same way.
*/
public void testDecomposed2() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"\u0049\u0316\u0307STANBUL \u0049\u0307ZM\u0049\u0307R I\u0316SPARTA"));
TurkishLowerCaseFilter filter = new TurkishLowerCaseFilter(stream);
assertTokenStreamContents(filter, new String[] {"i\u0316stanbul", "izmir",
diff --git a/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java b/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
index d9ecdd2d716..8efba8a5958 100644
--- a/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
+++ b/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
@@ -20,8 +20,6 @@ package org.apache.lucene.ant;
import java.io.File;
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.queryParser.QueryParser;
@@ -31,13 +29,13 @@ import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.FSDirectory;
import org.apache.tools.ant.Project;
import org.apache.tools.ant.types.FileSet;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Test cases for index task
*
*/
-public class IndexTaskTest extends TestCase {
+public class IndexTaskTest extends LuceneTestCase {
private final static String docHandler =
"org.apache.lucene.ant.FileExtensionDocumentHandler";
@@ -55,7 +53,8 @@ public class IndexTaskTest extends TestCase {
*@exception IOException Description of Exception
*/
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
+ super.setUp();
Project project = new Project();
IndexTask task = new IndexTask();
@@ -71,12 +70,12 @@ public class IndexTaskTest extends TestCase {
dir = FSDirectory.open(indexDir);
searcher = new IndexSearcher(dir, true);
- analyzer = new StopAnalyzer(Version.LUCENE_CURRENT);
+ analyzer = new StopAnalyzer(TEST_VERSION_CURRENT);
}
public void testSearch() throws Exception {
- Query query = new QueryParser(Version.LUCENE_CURRENT, "contents",analyzer).parse("test");
+ Query query = new QueryParser(TEST_VERSION_CURRENT, "contents",analyzer).parse("test");
int numHits = searcher.search(query, null, 1000).totalHits;
@@ -88,9 +87,10 @@ public class IndexTaskTest extends TestCase {
* TODO: remove indexDir?
*/
@Override
- public void tearDown() throws IOException {
+ protected void tearDown() throws Exception {
searcher.close();
dir.close();
+ super.tearDown();
}
}
diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java
index d2292655483..c53ff8eb3bc 100644
--- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java
+++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java
@@ -13,10 +13,12 @@ import java.util.Date;
public class LongToEnglishContentSource extends ContentSource{
private long counter = Long.MIN_VALUE + 10;
+ @Override
public void close() throws IOException {
}
//TODO: reduce/clean up synchonization
+ @Override
public synchronized DocData getNextDocData(DocData docData) throws NoMoreDataException, IOException {
docData.clear();
docData.setBody(English.longToEnglish(counter));
diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java
index af6ec5bf6b6..4a409c6532a 100644
--- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java
+++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java
@@ -30,7 +30,7 @@ public interface QueryMaker {
/**
* Create the next query, of the given size.
* @param size the size of the query - number of terms, etc.
- * @exception if cannot make the query, or if size>0 was specified but this feature is not supported.
+ * @exception Exception if cannot make the query, or if size>0 was specified but this feature is not supported.
*/
public Query makeQuery (int size) throws Exception;
diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java
index b835295d02e..d2f8b2bbd16 100644
--- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java
+++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java
@@ -29,8 +29,6 @@ import org.apache.lucene.benchmark.byTask.utils.Config;
*/
public class Points {
- private Config config;
-
// stat points ordered by their start time.
// for now we collect points as TaskStats objects.
// later might optimize to collect only native data.
@@ -44,7 +42,6 @@ public class Points {
* Create a Points statistics object.
*/
public Points (Config config) {
- this.config = config;
}
/**
diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java
index 8e1555d4fb3..494f19cc9da 100644
--- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java
+++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java
@@ -211,7 +211,7 @@ public class TaskStats implements Cloneable {
public Object clone() throws CloneNotSupportedException {
TaskStats c = (TaskStats) super.clone();
if (c.countsByTime != null) {
- c.countsByTime = (int[]) c.countsByTime.clone();
+ c.countsByTime = c.countsByTime.clone();
}
return c;
}
diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java
index 48af64728c7..c31dd5d4cc8 100644
--- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java
+++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java
@@ -70,7 +70,7 @@ public class NearRealtimeReaderTask extends PerfTask {
// Parent sequence sets stopNow
reopenCount = 0;
while(!stopNow) {
- long waitForMsec = (long) (pauseMSec - (System.currentTimeMillis() - t));
+ long waitForMsec = (pauseMSec - (System.currentTimeMillis() - t));
if (waitForMsec > 0) {
Thread.sleep(waitForMsec);
//System.out.println("NRT wait: " + waitForMsec + " msec");
diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewShingleAnalyzerTask.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewShingleAnalyzerTask.java
index 92b9a77d271..27b805c41a7 100644
--- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewShingleAnalyzerTask.java
+++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewShingleAnalyzerTask.java
@@ -18,12 +18,10 @@ package org.apache.lucene.benchmark.byTask.tasks;
*/
import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
import java.util.StringTokenizer;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.shingle.ShingleAnalyzerWrapper;
-import org.apache.lucene.analysis.shingle.ShingleFilter;
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.util.Version;
@@ -39,8 +37,6 @@ import org.apache.lucene.util.Version;
public class NewShingleAnalyzerTask extends PerfTask {
private String analyzerClassName = "standard.StandardAnalyzer";
- private static final String shingleAnalyzerClassName
- = "org.apache.lucene.analysis.shingle.ShingleAnalyzerWrapper";
private int maxShingleSize = 2;
private boolean outputUnigrams = true;
diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
index 5aed1eabae1..175c5d1b620 100644
--- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
+++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
@@ -140,6 +140,6 @@ public class ReadTokensTask extends PerfTask {
}
}
@Override
- public void close() {};
+ public void close() {}
}
}
diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java
index 4bf4312ef4d..763b9b8352a 100644
--- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java
+++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java
@@ -100,7 +100,6 @@ public class Config {
/**
* Create config without algorithm - useful for a programmatic perf test.
* @param props - configuration properties.
- * @throws IOException
*/
public Config (Properties props) {
this.props = props;
@@ -109,6 +108,7 @@ public class Config {
}
}
+ @SuppressWarnings("unchecked")
private void printProps() {
System.out.println("------------> config properties:");
List propKeys = new ArrayList(props.keySet());
diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/stats/TimeData.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/stats/TimeData.java
index f3a843ff06e..26b00ad10b6 100644
--- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/stats/TimeData.java
+++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/stats/TimeData.java
@@ -34,7 +34,7 @@ public class TimeData {
/** Total memory at the end of measurement interval. */
public long totalMem = 0L;
- public TimeData() {};
+ public TimeData() {}
public TimeData(String name) {
this.name = name;
@@ -78,7 +78,7 @@ public class TimeData {
/** Get rate of processing, defined as number of processed records per second. */
public double getRate() {
- double rps = (double) count * 1000.0 / (double) (elapsed>0 ? elapsed : 1); // assume at least 1ms for any countable op
+ double rps = count * 1000.0 / (elapsed > 0 ? elapsed : 1); // assume at least 1ms for any countable op
return rps;
}
diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java
index b53e096d364..4a14ea9f8d8 100644
--- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java
+++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java
@@ -94,14 +94,12 @@ public class ExtractReuters
StringBuffer outBuffer = new StringBuffer(1024);
String line = null;
- int index = -1;
int docNumber = 0;
while ((line = reader.readLine()) != null)
{
//when we see a closing reuters tag, flush the file
- if ((index = line.indexOf(" fragInfos = new ArrayList();
/**
@@ -38,7 +37,6 @@ public class FieldFragList {
* @param fragCharSize the length (number of chars) of a fragment
*/
public FieldFragList( int fragCharSize ){
- this.fragCharSize = fragCharSize;
}
/**
diff --git a/contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java b/contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
index a45fcc9c164..289cfe154cd 100644
--- a/contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
+++ b/contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
@@ -64,7 +64,7 @@ public class FieldTermStack {
writer.close();
IndexReader reader = IndexReader.open( dir, true );
- FieldTermStack ftl = new FieldTermStack( reader, 0, "f", fieldQuery );
+ new FieldTermStack( reader, 0, "f", fieldQuery );
reader.close();
}
diff --git a/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java b/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
index 979967dcc9b..528c29dd3f0 100644
--- a/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
+++ b/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
@@ -21,11 +21,8 @@ import java.io.IOException;
import java.io.Reader;
import java.util.Collection;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.KeywordAnalyzer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -47,9 +44,9 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public abstract class AbstractTestCase extends TestCase {
+public abstract class AbstractTestCase extends LuceneTestCase {
protected final String F = "f";
protected final String F1 = "f1";
@@ -87,11 +84,12 @@ public abstract class AbstractTestCase extends TestCase {
@Override
protected void setUp() throws Exception {
- analyzerW = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ super.setUp();
+ analyzerW = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
analyzerB = new BigramAnalyzer();
analyzerK = new KeywordAnalyzer();
- paW = new QueryParser(Version.LUCENE_CURRENT, F, analyzerW );
- paB = new QueryParser(Version.LUCENE_CURRENT, F, analyzerB );
+ paW = new QueryParser(TEST_VERSION_CURRENT, F, analyzerW );
+ paB = new QueryParser(TEST_VERSION_CURRENT, F, analyzerB );
dir = new RAMDirectory();
}
@@ -101,6 +99,7 @@ public abstract class AbstractTestCase extends TestCase {
reader.close();
reader = null;
}
+ super.tearDown();
}
protected Query tq( String text ){
@@ -282,7 +281,7 @@ public abstract class AbstractTestCase extends TestCase {
}
charBufferIndex = 0;
}
- int c = (int)charBuffer[charBufferIndex++];
+ int c = charBuffer[charBufferIndex++];
nextStartOffset++;
return c;
}
@@ -291,11 +290,13 @@ public abstract class AbstractTestCase extends TestCase {
return delimiters.indexOf( c ) >= 0;
}
+ @Override
public void reset( Reader input ) throws IOException {
super.reset( input );
reset();
}
+ @Override
public void reset() throws IOException {
startTerm = 0;
nextStartOffset = 0;
diff --git a/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java b/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java
index 0fe683c20ab..ac0311afeba 100644
--- a/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java
+++ b/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java
@@ -17,7 +17,6 @@ package org.apache.lucene.search.vectorhighlight;
* limitations under the License.
*/
-import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
diff --git a/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java b/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
index b9717c22003..5113b19e952 100644
--- a/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
+++ b/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
@@ -48,12 +48,10 @@ import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.OpenBitSet;
-import org.apache.lucene.util.Version;
-import junit.framework.TestCase;
-
-public class HighlighterPhraseTest extends TestCase {
+public class HighlighterPhraseTest extends LuceneTestCase {
private static final String FIELD = "text";
public void testConcurrentPhrase() throws CorruptIndexException,
@@ -61,7 +59,7 @@ public class HighlighterPhraseTest extends TestCase {
final String TEXT = "the fox jumped";
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamConcurrent(),
@@ -104,7 +102,7 @@ public class HighlighterPhraseTest extends TestCase {
final String TEXT = "the fox jumped";
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamConcurrent(),
@@ -125,19 +123,23 @@ public class HighlighterPhraseTest extends TestCase {
indexSearcher.search(phraseQuery, new Collector() {
private int baseDoc;
+ @Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
+ @Override
public void collect(int i) throws IOException {
bitset.set(this.baseDoc + i);
}
+ @Override
public void setNextReader(IndexReader indexreader, int i)
throws IOException {
this.baseDoc = i;
}
+ @Override
public void setScorer(org.apache.lucene.search.Scorer scorer)
throws IOException {
// Do Nothing
@@ -169,7 +171,7 @@ public class HighlighterPhraseTest extends TestCase {
final String TEXT = "the fox did not jump";
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamSparse(),
@@ -211,7 +213,7 @@ public class HighlighterPhraseTest extends TestCase {
final String TEXT = "the fox did not jump";
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
try {
final Document document = new Document();
document.add(new Field(FIELD, TEXT, Store.YES, Index.ANALYZED,
@@ -251,7 +253,7 @@ public class HighlighterPhraseTest extends TestCase {
final String TEXT = "the fox did not jump";
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamSparse(),
@@ -322,6 +324,7 @@ public class HighlighterPhraseTest extends TestCase {
return true;
}
+ @Override
public void reset() {
this.i = -1;
this.tokens = new Token[] {
@@ -367,6 +370,7 @@ public class HighlighterPhraseTest extends TestCase {
return true;
}
+ @Override
public void reset() {
this.i = -1;
this.tokens = new Token[] {
diff --git a/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
index 9e79076d05e..02d9375da66 100644
--- a/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
+++ b/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
@@ -79,6 +79,7 @@ import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Version;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
@@ -89,7 +90,7 @@ import org.w3c.dom.NodeList;
*/
public class HighlighterTest extends BaseTokenStreamTestCase implements Formatter {
// TODO: change to CURRENT, does not work because posIncr:
- static final Version TEST_VERSION = Version.LUCENE_CURRENT;
+ static final Version TEST_VERSION = TEST_VERSION_CURRENT;
private IndexReader reader;
static final String FIELD_NAME = "contents";
@@ -118,7 +119,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
}
public void testQueryScorerHits() throws Exception {
- Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
QueryParser qp = new QueryParser(TEST_VERSION, FIELD_NAME, analyzer);
query = qp.parse("\"very long\"");
searcher = new IndexSearcher(ramDir, true);
@@ -226,7 +227,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String f2c = f2 + ":";
String q = "(" + f1c + ph1 + " OR " + f2c + ph1 + ") AND (" + f1c + ph2
+ " OR " + f2c + ph2 + ")";
- Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
QueryParser qp = new QueryParser(TEST_VERSION, f1, analyzer);
Query query = qp.parse(q);
@@ -374,8 +375,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
highlighter.setTextFragmenter(new SimpleFragmenter(40));
- String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
- "...");
+// String result =
+ highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,"...");
//System.out.println("\t" + result);
}
@@ -1389,9 +1390,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
// highlighting respects fieldnames used in query
Scorer fieldSpecificScorer = null;
- if (mode == this.QUERY) {
+ if (mode == TestHighlightRunner.QUERY) {
fieldSpecificScorer = new QueryScorer(query, FIELD_NAME);
- } else if (mode == this.QUERY_TERM) {
+ } else if (mode == TestHighlightRunner.QUERY_TERM) {
fieldSpecificScorer = new QueryTermScorer(query, "contents");
}
Highlighter fieldSpecificHighlighter = new Highlighter(new SimpleHTMLFormatter(),
@@ -1402,9 +1403,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
// highlighting does not respect fieldnames used in query
Scorer fieldInSpecificScorer = null;
- if (mode == this.QUERY) {
+ if (mode == TestHighlightRunner.QUERY) {
fieldInSpecificScorer = new QueryScorer(query, null);
- } else if (mode == this.QUERY_TERM) {
+ } else if (mode == TestHighlightRunner.QUERY_TERM) {
fieldInSpecificScorer = new QueryTermScorer(query);
}
@@ -1529,64 +1530,64 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
Highlighter highlighter;
String result;
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("foo");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("foo");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 foo ", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("10");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("10");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hi");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi -Speed10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("speed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("speed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hispeed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hispeed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hi speed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi speed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
// ///////////////// same tests, just put the bigger overlapping token
// first
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("foo");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("foo");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 foo ", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("10");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("10");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hi");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi -Speed10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("speed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("speed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hispeed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hispeed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hi speed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi speed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
@@ -1597,7 +1598,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
}
private Directory dir = new RAMDirectory();
- private Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ private Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
public void testWeightedTermsWithDeletes() throws IOException, ParseException, InvalidTokenOffsetsException {
makeIndex();
@@ -1762,11 +1763,6 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
}
- @Override
- protected void tearDown() throws Exception {
- super.tearDown();
- }
-
private static Token createToken(String term, int start, int offset)
{
Token token = new Token(start, offset);
@@ -1801,7 +1797,7 @@ class SynonymAnalyzer extends Analyzer {
*/
@Override
public TokenStream tokenStream(String arg0, Reader arg1) {
- LowerCaseTokenizer stream = new LowerCaseTokenizer(Version.LUCENE_CURRENT, arg1);
+ LowerCaseTokenizer stream = new LowerCaseTokenizer(LuceneTestCase.TEST_VERSION_CURRENT, arg1);
stream.addAttribute(TermAttribute.class);
stream.addAttribute(PositionIncrementAttribute.class);
stream.addAttribute(OffsetAttribute.class);
@@ -1816,7 +1812,6 @@ class SynonymAnalyzer extends Analyzer {
class SynonymTokenizer extends TokenStream {
private TokenStream realStream;
private Token currentRealToken = null;
- private org.apache.lucene.analysis.Token cRealToken = null;
private Map synonyms;
StringTokenizer st = null;
private TermAttribute realTermAtt;
diff --git a/contrib/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyFilter.java b/contrib/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyFilter.java
index e0a6c998609..e9bd0a4bf34 100644
--- a/contrib/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyFilter.java
+++ b/contrib/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyFilter.java
@@ -43,16 +43,16 @@ public class TestICUCollationKeyFilter extends CollationTestBase {
public class TestAnalyzer extends Analyzer {
- private Collator collator;
+ private Collator _collator;
TestAnalyzer(Collator collator) {
- this.collator = collator;
+ _collator = collator;
}
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new KeywordTokenizer(reader);
- result = new ICUCollationKeyFilter(result, collator);
+ result = new ICUCollationKeyFilter(result, _collator);
return result;
}
}
diff --git a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java
index a62e7c5e32f..5154f267e2b 100644
--- a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java
+++ b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java
@@ -30,7 +30,7 @@ import java.util.Map;
public class InstantiatedDocument
implements Serializable {
- private static long serialVersionUID = 1l;
+ private static final long serialVersionUID = 1l;
private Document document;
diff --git a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java
index 797dab3cdd3..fc24e7427d8 100644
--- a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java
+++ b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java
@@ -16,13 +16,12 @@ package org.apache.lucene.store.instantiated;
* limitations under the License.
*/
+import java.io.Closeable;
import java.io.IOException;
import java.io.Serializable;
-import java.io.Closeable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
diff --git a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java
index e6b1d5111b7..290637bc4ec 100644
--- a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java
+++ b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java
@@ -167,11 +167,9 @@ public class InstantiatedIndexReader extends IndexReader {
}
@Override
- protected void doCommit(Map commitUserData) throws IOException {
+ protected void doCommit(Map commitUserData) throws IOException {
// todo: read/write lock
- boolean updated = false;
-
// 1. update norms
if (uncommittedNormsByFieldNameAndDocumentNumber != null) {
for (Map.Entry> e : uncommittedNormsByFieldNameAndDocumentNumber.entrySet()) {
@@ -181,8 +179,6 @@ public class InstantiatedIndexReader extends IndexReader {
}
}
uncommittedNormsByFieldNameAndDocumentNumber = null;
-
- updated = true;
}
// 2. remove deleted documents
@@ -197,9 +193,6 @@ public class InstantiatedIndexReader extends IndexReader {
}
}
uncommittedDeletedDocuments = null;
-
- updated = true;
-
}
// todo unlock read/writelock
diff --git a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
index 74f38cacbfa..5e336ebc296 100644
--- a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
+++ b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
@@ -37,7 +37,6 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
@@ -470,7 +469,7 @@ public class InstantiatedIndexWriter implements Closeable {
// normalize settings per field name in document
Map fieldSettingsByFieldName = new HashMap();
- for (Fieldable field : (List) document.getDocument().getFields()) {
+ for (Fieldable field : document.getDocument().getFields()) {
FieldSetting fieldSetting = fieldSettingsByFieldName.get(field.name());
if (fieldSetting == null) {
fieldSetting = new FieldSetting();
@@ -514,7 +513,7 @@ public class InstantiatedIndexWriter implements Closeable {
Map> tokensByField = new LinkedHashMap>(20);
// tokenize indexed fields.
- for (Iterator it = (Iterator) document.getDocument().getFields().iterator(); it.hasNext();) {
+ for (Iterator it = document.getDocument().getFields().iterator(); it.hasNext();) {
Fieldable field = it.next();
@@ -526,7 +525,6 @@ public class InstantiatedIndexWriter implements Closeable {
tokensByField.put(field, tokens);
if (field.isTokenized()) {
- int termCounter = 0;
final TokenStream tokenStream;
// todo readerValue(), binaryValue()
if (field.tokenStreamValue() != null) {
diff --git a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java
index b5f33c04373..e50ff74ff9f 100644
--- a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java
+++ b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java
@@ -16,12 +16,10 @@ package org.apache.lucene.store.instantiated;
* limitations under the License.
*/
-import org.apache.lucene.index.Term;
-
import java.io.Serializable;
import java.util.Comparator;
-import java.util.Collections;
-import java.util.Arrays;
+
+import org.apache.lucene.index.Term;
/**
* A term in the inverted index, coupled to the documents it occurs in.
diff --git a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java
index 31d2efc266a..91154c1b69b 100644
--- a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java
+++ b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java
@@ -19,14 +19,10 @@ package org.apache.lucene.store.instantiated;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
-import java.io.IOException;
-import java.util.Arrays;
-
/**
* A {@link org.apache.lucene.index.TermEnum} navigating an {@link org.apache.lucene.store.instantiated.InstantiatedIndexReader}.
*/
-public class InstantiatedTermEnum
- extends TermEnum {
+public class InstantiatedTermEnum extends TermEnum {
private final InstantiatedIndexReader reader;
diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
index c212a406397..8d8d7563645 100644
--- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
+++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
@@ -22,8 +22,6 @@ import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
@@ -42,11 +40,12 @@ import org.apache.lucene.index.TermPositions;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Asserts equality of content and behaviour of two index readers.
*/
-public class TestIndicesEquals extends TestCase {
+public class TestIndicesEquals extends LuceneTestCase {
// public void test2() throws Exception {
// FSDirectory fsdir = FSDirectory.open(new File("/tmp/fatcorpus"));
@@ -61,7 +60,7 @@ public class TestIndicesEquals extends TestCase {
RAMDirectory dir = new RAMDirectory();
// create dir data
- IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
for (int i = 0; i < 20; i++) {
Document document = new Document();
assembleDocument(document, i);
@@ -85,7 +84,7 @@ public class TestIndicesEquals extends TestCase {
InstantiatedIndex ii = new InstantiatedIndex();
// create dir data
- IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
for (int i = 0; i < 500; i++) {
Document document = new Document();
assembleDocument(document, i);
@@ -94,7 +93,7 @@ public class TestIndicesEquals extends TestCase {
indexWriter.close();
// test ii writer
- InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true);
+ InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new StandardAnalyzer(TEST_VERSION_CURRENT), true);
for (int i = 0; i < 500; i++) {
Document document = new Document();
assembleDocument(document, i);
diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java
index 68ece1fcca0..2ee5dd2920e 100644
--- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java
+++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java
@@ -17,10 +17,9 @@ package org.apache.lucene.store.instantiated;
*/
-import junit.framework.TestCase;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -30,13 +29,13 @@ import org.apache.lucene.document.Field;
import java.io.ByteArrayOutputStream;
import java.io.ObjectOutputStream;
-public class TestSerialization extends TestCase {
+public class TestSerialization extends LuceneTestCase {
public void test() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("foo", "bar rab abr bra rba", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("moo", "bar rab abr bra rba", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java
index 903192fda8b..81f77d51420 100644
--- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java
+++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java
@@ -15,16 +15,12 @@ package org.apache.lucene.store.instantiated;
*
*/
-import junit.framework.TestCase;
-
import java.io.IOException;
-import java.util.Map;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -32,21 +28,21 @@ import org.apache.lucene.document.Field;
/**
* @since 2009-mar-30 13:15:49
*/
-public class TestUnoptimizedReaderOnConstructor extends TestCase {
+public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase {
public void test() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
addDocument(iw, "Hello, world!");
addDocument(iw, "All work and no play makes jack a dull boy");
iw.close();
- iw = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
addDocument(iw, "Hello, tellus!");
addDocument(iw, "All work and no play makes danny a dull boy");
iw.close();
- iw = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
addDocument(iw, "Hello, earth!");
addDocument(iw, "All work and no play makes wendy a dull girl");
iw.close();
@@ -54,9 +50,8 @@ public class TestUnoptimizedReaderOnConstructor extends TestCase {
IndexReader unoptimizedReader = IndexReader.open(dir, false);
unoptimizedReader.deleteDocument(2);
- InstantiatedIndex ii;
try {
- ii = new InstantiatedIndex(unoptimizedReader);
+ new InstantiatedIndex(unoptimizedReader);
} catch (Exception e) {
fail("No exceptions when loading an unoptimized reader!");
}
diff --git a/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index 70de1074f95..a04e57c5ea3 100644
--- a/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -793,41 +793,41 @@ public class MemoryIndex implements Serializable {
return new TermEnum() {
- private int i = ix; // index into info.sortedTerms
- private int j = jx; // index into sortedFields
+ private int srtTermsIdx = ix; // index into info.sortedTerms
+ private int srtFldsIdx = jx; // index into sortedFields
@Override
public boolean next() {
if (DEBUG) System.err.println("TermEnum.next");
- if (j >= sortedFields.length) return false;
- Info info = getInfo(j);
- if (++i < info.sortedTerms.length) return true;
+ if (srtFldsIdx >= sortedFields.length) return false;
+ Info info = getInfo(srtFldsIdx);
+ if (++srtTermsIdx < info.sortedTerms.length) return true;
// move to successor
- j++;
- i = 0;
- if (j >= sortedFields.length) return false;
- getInfo(j).sortTerms();
+ srtFldsIdx++;
+ srtTermsIdx = 0;
+ if (srtFldsIdx >= sortedFields.length) return false;
+ getInfo(srtFldsIdx).sortTerms();
return true;
}
@Override
public Term term() {
- if (DEBUG) System.err.println("TermEnum.term: " + i);
- if (j >= sortedFields.length) return null;
- Info info = getInfo(j);
- if (i >= info.sortedTerms.length) return null;
+ if (DEBUG) System.err.println("TermEnum.term: " + srtTermsIdx);
+ if (srtFldsIdx >= sortedFields.length) return null;
+ Info info = getInfo(srtFldsIdx);
+ if (srtTermsIdx >= info.sortedTerms.length) return null;
// if (DEBUG) System.err.println("TermEnum.term: " + i + ", " + info.sortedTerms[i].getKey());
- return createTerm(info, j, info.sortedTerms[i].getKey());
+ return createTerm(info, srtFldsIdx, info.sortedTerms[srtTermsIdx].getKey());
}
@Override
public int docFreq() {
if (DEBUG) System.err.println("TermEnum.docFreq");
- if (j >= sortedFields.length) return 0;
- Info info = getInfo(j);
- if (i >= info.sortedTerms.length) return 0;
- return numPositions(info.getPositions(i));
+ if (srtFldsIdx >= sortedFields.length) return 0;
+ Info info = getInfo(srtFldsIdx);
+ if (srtTermsIdx >= info.sortedTerms.length) return 0;
+ return numPositions(info.getPositions(srtTermsIdx));
}
@Override
diff --git a/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java b/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
index b1d8cc7d1ad..12b60525796 100644
--- a/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
+++ b/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
@@ -31,7 +31,6 @@ import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
-import java.util.Set;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
@@ -52,7 +51,6 @@ import org.apache.lucene.search.Scorer;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.util.Version;
/**
Verifies that Lucene MemoryIndex and RAMDirectory have the same behaviour,
@@ -202,7 +200,6 @@ the^3
public class MemoryIndexTest extends BaseTokenStreamTestCase {
private Analyzer analyzer;
- private boolean fastMode = false;
private final boolean verbose = false;
@@ -271,16 +268,14 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
}
}
- boolean toLowerCase = true;
// boolean toLowerCase = false;
// Set stopWords = null;
- Set> stopWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
Analyzer[] analyzers = new Analyzer[] {
- new SimpleAnalyzer(Version.LUCENE_CURRENT),
- new StopAnalyzer(Version.LUCENE_CURRENT),
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
-// new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ new SimpleAnalyzer(TEST_VERSION_CURRENT),
+ new StopAnalyzer(TEST_VERSION_CURRENT),
+ new StandardAnalyzer(TEST_VERSION_CURRENT),
+// new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
// new PatternAnalyzer(PatternAnalyzer.NON_WORD_PATTERN, false, null),
// new PatternAnalyzer(PatternAnalyzer.NON_WORD_PATTERN, true, stopWords),
// new SnowballAnalyzer("English", StopAnalyzer.ENGLISH_STOP_WORDS),
@@ -465,7 +460,8 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
}
}
- private int getMemorySize(Object index) {
+ // for debugging purposes
+ int getMemorySize(Object index) {
if (index instanceof Directory) {
try {
Directory dir = (Directory) index;
@@ -486,7 +482,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
}
private Query parseQuery(String expression) throws ParseException {
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, analyzer);
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer);
// parser.setPhraseSlop(0);
return parser.parse(expression);
}
@@ -559,7 +555,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
System.arraycopy(output, 0, buffer, 0, len);
return buffer;
} finally {
- if (input != null) input.close();
+ input.close();
}
}
diff --git a/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java b/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
index 779881242ca..46b79d03fef 100644
--- a/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
+++ b/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
@@ -59,7 +59,7 @@ public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy
protected long size(SegmentInfo info) throws IOException {
long byteSize = info.sizeInBytes();
float delRatio = (info.docCount <= 0 ? 0.0f : ((float)info.getDelCount() / (float)info.docCount));
- return (info.docCount <= 0 ? byteSize : (long)((float)byteSize * (1.0f - delRatio)));
+ return (info.docCount <= 0 ? byteSize : (long)((1.0f - delRatio) * byteSize));
}
public void setPartialExpunge(boolean doPartialExpunge) {
diff --git a/contrib/misc/src/java/org/apache/lucene/misc/ChainedFilter.java b/contrib/misc/src/java/org/apache/lucene/misc/ChainedFilter.java
index 3f7deb6d1ab..a1dfdaef2a0 100644
--- a/contrib/misc/src/java/org/apache/lucene/misc/ChainedFilter.java
+++ b/contrib/misc/src/java/org/apache/lucene/misc/ChainedFilter.java
@@ -25,7 +25,6 @@ import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.OpenBitSet;
import org.apache.lucene.util.OpenBitSetDISI;
-import org.apache.lucene.util.SortedVIntList;
/**
*
diff --git a/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java b/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
index 841c1184327..17ba8ff8c08 100644
--- a/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
+++ b/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
@@ -20,8 +20,6 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Arrays;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -34,12 +32,13 @@ import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Tests changing of field norms with a custom similarity and with fake norms.
*/
-public class TestFieldNormModifier extends TestCase {
+public class TestFieldNormModifier extends LuceneTestCase {
+
public TestFieldNormModifier(String name) {
super(name);
}
@@ -57,8 +56,9 @@ public class TestFieldNormModifier extends TestCase {
};
@Override
- public void setUp() throws Exception {
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, MaxFieldLength.UNLIMITED);
+ protected void setUp() throws Exception {
+ super.setUp();
+ IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED);
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
diff --git a/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java b/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
index 620f7a3d6f5..0a96fc80f4f 100644
--- a/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
+++ b/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
@@ -23,7 +23,6 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
public class TestIndexSplitter extends LuceneTestCase {
@@ -36,7 +35,7 @@ public class TestIndexSplitter extends LuceneTestCase {
_TestUtil.rmDir(destDir);
destDir.mkdirs();
FSDirectory fsDir = FSDirectory.open(dir);
- IndexWriter iw = new IndexWriter(fsDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, MaxFieldLength.UNLIMITED);
+ IndexWriter iw = new IndexWriter(fsDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED);
for (int x=0; x < 100; x++) {
Document doc = TestIndexWriterReader.createDocument(x, "index", 5);
iw.addDocument(doc);
diff --git a/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java b/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
index 8a7eb3a7e35..04a312a74a4 100644
--- a/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
+++ b/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
@@ -22,18 +22,17 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-import junit.framework.TestCase;
-
-public class TestMultiPassIndexSplitter extends TestCase {
+public class TestMultiPassIndexSplitter extends LuceneTestCase {
IndexReader input;
int NUM_DOCS = 11;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
+ super.setUp();
RAMDirectory dir = new RAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
MaxFieldLength.LIMITED);
Document doc;
for (int i = 0; i < NUM_DOCS; i++) {
diff --git a/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java b/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java
index f8a15990e03..e4e794a3bfb 100644
--- a/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java
+++ b/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java
@@ -1,12 +1,11 @@
package org.apache.lucene.index;
-import junit.framework.TestCase;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
import java.util.Collections;
/*
@@ -24,13 +23,12 @@ import java.util.Collections;
*
*/
-
-public class TestTermVectorAccessor extends TestCase {
+public class TestTermVectorAccessor extends LuceneTestCase {
public void test() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc;
diff --git a/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java b/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java
index 92d95ffb32f..d6c6a8578fb 100644
--- a/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java
+++ b/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java
@@ -20,8 +20,6 @@ package org.apache.lucene.misc;
import java.util.Calendar;
import java.util.GregorianCalendar;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -43,9 +41,9 @@ import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class ChainedFilterTest extends TestCase {
+public class ChainedFilterTest extends LuceneTestCase {
public static final int MAX = 500;
private RAMDirectory directory;
@@ -57,10 +55,11 @@ public class ChainedFilterTest extends TestCase {
private QueryWrapperFilter sueFilter;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
IndexWriter writer =
- new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Calendar cal = new GregorianCalendar();
cal.clear();
@@ -188,7 +187,7 @@ public class ChainedFilterTest extends TestCase {
public void testWithCachingFilter() throws Exception {
Directory dir = new RAMDirectory();
- Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(dir, analyzer, true, MaxFieldLength.LIMITED);
writer.close();
diff --git a/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java b/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
index 55d513a8168..1830291f587 100644
--- a/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
+++ b/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
@@ -18,30 +18,10 @@
package org.apache.lucene.misc;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.DefaultSimilarity;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.DisjunctionMaxQuery;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanClause.Occur;
-
-import junit.framework.Test;
import junit.framework.TestCase;
-import junit.framework.TestSuite;
-import java.io.File;
-import java.math.BigDecimal;
-import java.util.Random;
-import java.util.Date;
-import java.util.List;
-import java.util.Arrays;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
/**
* Test of the SweetSpotSimilarity
diff --git a/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java b/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
index da80777a08a..8c49e2063bf 100644
--- a/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
+++ b/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
@@ -19,8 +19,6 @@ package org.apache.lucene.misc;
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -37,12 +35,13 @@ import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Tests changing the norms after changing the simularity
*/
-public class TestLengthNormModifier extends TestCase {
+public class TestLengthNormModifier extends LuceneTestCase {
+
public TestLengthNormModifier(String name) {
super(name);
}
@@ -60,8 +59,9 @@ public class TestLengthNormModifier extends TestCase {
};
@Override
- public void setUp() throws Exception {
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, MaxFieldLength.UNLIMITED);
+ protected void setUp() throws Exception {
+ super.setUp();
+ IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED);
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
diff --git a/contrib/misc/src/test/org/apache/lucene/queryParser/analyzing/TestAnalyzingQueryParser.java b/contrib/misc/src/test/org/apache/lucene/queryParser/analyzing/TestAnalyzingQueryParser.java
index f570728fd85..8973e15ca80 100644
--- a/contrib/misc/src/test/org/apache/lucene/queryParser/analyzing/TestAnalyzingQueryParser.java
+++ b/contrib/misc/src/test/org/apache/lucene/queryParser/analyzing/TestAnalyzingQueryParser.java
@@ -19,8 +19,6 @@ package org.apache.lucene.queryParser.analyzing;
import java.io.Reader;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.ASCIIFoldingFilter;
import org.apache.lucene.analysis.LowerCaseFilter;
@@ -28,12 +26,12 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* @version $Revision$, $Date$
*/
-public class TestAnalyzingQueryParser extends TestCase {
+public class TestAnalyzingQueryParser extends LuceneTestCase {
private Analyzer a;
@@ -47,7 +45,8 @@ public class TestAnalyzingQueryParser extends TestCase {
private String[] fuzzyExpected;
@Override
- public void setUp() {
+ protected void setUp() throws Exception {
+ super.setUp();
wildcardInput = new String[] { "übersetzung über*ung",
"Mötley Cr\u00fce Mötl?* Crü?", "Renée Zellweger Ren?? Zellw?ger" };
wildcardExpected = new String[] { "ubersetzung uber*ung", "motley crue motl?* cru?",
@@ -99,7 +98,7 @@ public class TestAnalyzingQueryParser extends TestCase {
}
private String parseWithAnalyzingQueryParser(String s, Analyzer a) throws ParseException {
- AnalyzingQueryParser qp = new AnalyzingQueryParser(Version.LUCENE_CURRENT, "field", a);
+ AnalyzingQueryParser qp = new AnalyzingQueryParser(TEST_VERSION_CURRENT, "field", a);
org.apache.lucene.search.Query q = qp.parse(s);
return q.toString("field");
}
@@ -112,10 +111,10 @@ class ASCIIAnalyzer extends org.apache.lucene.analysis.Analyzer {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(LuceneTestCase.TEST_VERSION_CURRENT, reader);
result = new StandardFilter(result);
result = new ASCIIFoldingFilter(result);
- result = new LowerCaseFilter(result);
+ result = new LowerCaseFilter(LuceneTestCase.TEST_VERSION_CURRENT, result);
return result;
}
}
diff --git a/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java b/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java
index a024871e1cb..d84e24c71f6 100644
--- a/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java
+++ b/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java
@@ -19,8 +19,6 @@ package org.apache.lucene.queryParser.complexPhrase;
import java.util.HashSet;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
@@ -33,11 +31,11 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class TestComplexPhraseQuery extends TestCase {
+public class TestComplexPhraseQuery extends LuceneTestCase {
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
DocData docsContent[] = { new DocData("john smith", "1"),
new DocData("johathon smith", "2"),
@@ -72,7 +70,7 @@ public class TestComplexPhraseQuery extends TestCase {
}
private void checkBadQuery(String qString) {
- QueryParser qp = new ComplexPhraseQueryParser(Version.LUCENE_CURRENT, defaultFieldName, analyzer);
+ QueryParser qp = new ComplexPhraseQueryParser(TEST_VERSION_CURRENT, defaultFieldName, analyzer);
Throwable expected = null;
try {
qp.parse(qString);
@@ -85,7 +83,7 @@ public class TestComplexPhraseQuery extends TestCase {
private void checkMatches(String qString, String expectedVals)
throws Exception {
- QueryParser qp = new ComplexPhraseQueryParser(Version.LUCENE_CURRENT, defaultFieldName, analyzer);
+ QueryParser qp = new ComplexPhraseQueryParser(TEST_VERSION_CURRENT, defaultFieldName, analyzer);
qp.setFuzzyPrefixLength(1); // usually a good idea
Query q = qp.parse(qString);
@@ -113,6 +111,7 @@ public class TestComplexPhraseQuery extends TestCase {
@Override
protected void setUp() throws Exception {
+ super.setUp();
RAMDirectory rd = new RAMDirectory();
IndexWriter w = new IndexWriter(rd, analyzer, MaxFieldLength.UNLIMITED);
for (int i = 0; i < docsContent.length; i++) {
@@ -130,6 +129,7 @@ public class TestComplexPhraseQuery extends TestCase {
@Override
protected void tearDown() throws Exception {
searcher.close();
+ super.tearDown();
}
static class DocData {
diff --git a/contrib/misc/src/test/org/apache/lucene/queryParser/ext/TestExtendableQueryParser.java b/contrib/misc/src/test/org/apache/lucene/queryParser/ext/TestExtendableQueryParser.java
index 78796d73a5a..e465e64286c 100644
--- a/contrib/misc/src/test/org/apache/lucene/queryParser/ext/TestExtendableQueryParser.java
+++ b/contrib/misc/src/test/org/apache/lucene/queryParser/ext/TestExtendableQueryParser.java
@@ -26,7 +26,6 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.util.Version;
/**
* Testcase for the class {@link ExtendableQueryParser}
@@ -47,10 +46,10 @@ public class TestExtendableQueryParser extends TestQueryParser {
public QueryParser getParser(Analyzer a, Extensions extensions)
throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
QueryParser qp = extensions == null ? new ExtendableQueryParser(
- Version.LUCENE_CURRENT, "field", a) : new ExtendableQueryParser(
- Version.LUCENE_CURRENT, "field", a, extensions);
+ TEST_VERSION_CURRENT, "field", a) : new ExtendableQueryParser(
+ TEST_VERSION_CURRENT, "field", a, extensions);
qp.setDefaultOperator(QueryParser.OR_OPERATOR);
return qp;
}
diff --git a/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java b/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
index 8fe8c53e8c9..961fe51bb4f 100644
--- a/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
+++ b/contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
@@ -36,7 +36,6 @@ import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.util.LocalizedTestCase;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.Reader;
@@ -100,7 +99,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
/** Filters LowerCaseTokenizer with StopFilter. */
@Override
public final TokenStream tokenStream(String fieldName, Reader reader) {
- return new QPTestFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader));
+ return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
}
}
@@ -123,14 +122,14 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
private int originalMaxClauses;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
originalMaxClauses = BooleanQuery.getMaxClauseCount();
}
public PrecedenceQueryParser getParser(Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
PrecedenceQueryParser qp = new PrecedenceQueryParser("field", a);
qp.setDefaultOperator(PrecedenceQueryParser.OR_OPERATOR);
return qp;
@@ -175,7 +174,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
public Query getQueryDOA(String query, Analyzer a)
throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
PrecedenceQueryParser qp = new PrecedenceQueryParser("field", a);
qp.setDefaultOperator(PrecedenceQueryParser.AND_OPERATOR);
return qp.parse(query);
@@ -241,7 +240,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
"+(title:dog title:cat) -author:\"bob dole\"");
- PrecedenceQueryParser qp = new PrecedenceQueryParser("field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ PrecedenceQueryParser qp = new PrecedenceQueryParser("field", new StandardAnalyzer(TEST_VERSION_CURRENT));
// make sure OR is the default:
assertEquals(PrecedenceQueryParser.OR_OPERATOR, qp.getDefaultOperator());
qp.setDefaultOperator(PrecedenceQueryParser.AND_OPERATOR);
@@ -255,7 +254,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
}
public void testPunct() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@@ -275,7 +274,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
- Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@@ -413,7 +412,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
}
public void testEscaped() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
/*assertQueryEquals("\\[brackets", a, "\\[brackets");
assertQueryEquals("\\[brackets", null, "brackets");
@@ -518,7 +517,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
public void testBoost()
throws Exception {
- StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.singleton("on"));
+ StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.singleton("on"));
PrecedenceQueryParser qp = new PrecedenceQueryParser("field", oneStopAnalyzer);
Query q = qp.parse("on^1.0");
assertNotNull(q);
@@ -531,7 +530,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
q = qp.parse("\"on\"^1.0");
assertNotNull(q);
- q = getParser(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)).parse("the^3");
+ q = getParser(new StandardAnalyzer(TEST_VERSION_CURRENT)).parse("the^3");
assertNotNull(q);
}
@@ -545,7 +544,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
public void testCustomQueryParserWildcard() {
try {
- new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("a?t");
+ new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("a?t");
} catch (ParseException expected) {
return;
}
@@ -554,7 +553,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
public void testCustomQueryParserFuzzy() throws Exception {
try {
- new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("xunit~");
+ new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("xunit~");
} catch (ParseException expected) {
return;
}
@@ -564,7 +563,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
public void testBooleanQuery() throws Exception {
BooleanQuery.setMaxClauseCount(2);
try {
- getParser(new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("one two three");
+ getParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("one two three");
fail("ParseException expected due to too many boolean clauses");
} catch (ParseException expected) {
// too many boolean clauses, so ParseException is expected
@@ -578,7 +577,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
// failing tests disabled since PrecedenceQueryParser
// is currently unmaintained
public void _testPrecedence() throws Exception {
- PrecedenceQueryParser parser = getParser(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ PrecedenceQueryParser parser = getParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
Query query1 = parser.parse("A AND B OR C AND D");
Query query2 = parser.parse("(A AND B) OR (C AND D)");
assertEquals(query1, query2);
@@ -606,8 +605,9 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
@Override
- public void tearDown() {
+ protected void tearDown() throws Exception {
BooleanQuery.setMaxClauseCount(originalMaxClauses);
+ super.tearDown();
}
}
diff --git a/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java b/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
index adcbee6e2bf..479e6cfd7ce 100644
--- a/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
+++ b/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
@@ -19,8 +19,6 @@ package org.apache.lucene.search;
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -28,18 +26,17 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class BooleanFilterTest extends TestCase
-{
+public class BooleanFilterTest extends LuceneTestCase {
private RAMDirectory directory;
private IndexReader reader;
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
//Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags
addDoc(writer, "admin guest", "010", "20040101","Y");
diff --git a/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java b/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
index 514d1fbaf4f..b0a4c961e4e 100644
--- a/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
+++ b/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
@@ -20,8 +20,6 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.HashSet;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -30,9 +28,9 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
-public class DuplicateFilterTest extends TestCase
-{
+public class DuplicateFilterTest extends LuceneTestCase {
private static final String KEY_FIELD = "url";
private RAMDirectory directory;
private IndexReader reader;
@@ -40,10 +38,10 @@ public class DuplicateFilterTest extends TestCase
private IndexSearcher searcher;
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
//Add series of docs with filterable fields : url, text and dates flags
addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101");
@@ -62,11 +60,11 @@ public class DuplicateFilterTest extends TestCase
}
@Override
- protected void tearDown() throws Exception
- {
+ protected void tearDown() throws Exception {
reader.close();
searcher.close();
directory.close();
+ super.tearDown();
}
private void addDoc(IndexWriter writer, String url, String text, String date) throws IOException
diff --git a/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java b/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
index 64c83c30153..6b553d4f127 100644
--- a/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
+++ b/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
@@ -20,8 +20,6 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.HashSet;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -30,17 +28,16 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class FuzzyLikeThisQueryTest extends TestCase
-{
+public class FuzzyLikeThisQueryTest extends LuceneTestCase {
private RAMDirectory directory;
private IndexSearcher searcher;
- private Analyzer analyzer=new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ private Analyzer analyzer=new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, analyzer,true, MaxFieldLength.UNLIMITED);
@@ -115,7 +112,7 @@ public class FuzzyLikeThisQueryTest extends TestCase
}
public void testFuzzyLikeThisQueryEquals() {
- Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
FuzzyLikeThisQuery fltq1 = new FuzzyLikeThisQuery(10, analyzer);
fltq1.addTerms("javi", "subject", 0.5f, 2);
FuzzyLikeThisQuery fltq2 = new FuzzyLikeThisQuery(10, analyzer);
diff --git a/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java b/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
index 4c4823c98df..58630d2e78a 100644
--- a/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
+++ b/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
@@ -19,8 +19,6 @@ package org.apache.lucene.search;
import java.util.HashSet;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -29,11 +27,11 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.OpenBitSet;
-import org.apache.lucene.util.Version;
-public class TermsFilterTest extends TestCase
-{
+public class TermsFilterTest extends LuceneTestCase {
+
public void testCachability() throws Exception
{
TermsFilter a=new TermsFilter();
@@ -56,7 +54,7 @@ public class TermsFilterTest extends TestCase
{
String fieldName="field1";
RAMDirectory rd=new RAMDirectory();
- IndexWriter w=new IndexWriter(rd,new WhitespaceAnalyzer(Version.LUCENE_CURRENT),MaxFieldLength.UNLIMITED);
+ IndexWriter w=new IndexWriter(rd,new WhitespaceAnalyzer(TEST_VERSION_CURRENT),MaxFieldLength.UNLIMITED);
for (int i = 0; i < 100; i++)
{
Document doc=new Document();
diff --git a/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java b/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
index ca20834ff48..597df2488cc 100644
--- a/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
+++ b/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
@@ -43,8 +43,9 @@ public class TestMoreLikeThis extends LuceneTestCase {
@Override
protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(TEST_VERSION_CURRENT),
true, MaxFieldLength.UNLIMITED);
// Add series of docs with specific information for MoreLikeThis
@@ -62,6 +63,7 @@ public class TestMoreLikeThis extends LuceneTestCase {
reader.close();
searcher.close();
directory.close();
+ super.tearDown();
}
private void addDoc(IndexWriter writer, String text) throws IOException {
@@ -96,7 +98,7 @@ public class TestMoreLikeThis extends LuceneTestCase {
for (int i = 0; i < clauses.size(); i++) {
BooleanClause clause = clauses.get(i);
TermQuery tq = (TermQuery) clause.getQuery();
- Float termBoost = (Float) originalValues.get(tq.getTerm().text());
+ Float termBoost = originalValues.get(tq.getTerm().text());
assertNotNull("Expected term " + tq.getTerm().text(), termBoost);
float totalBoost = termBoost.floatValue() * boostFactor;
diff --git a/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java b/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java
index be44c7c7a96..ad22c198928 100644
--- a/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java
+++ b/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java
@@ -45,7 +45,7 @@ import org.apache.lucene.util.AttributeSource;
*/
public abstract class QueryConfigHandler extends AttributeSource {
- private LinkedList listeners = new LinkedList();;
+ private LinkedList listeners = new LinkedList();
/**
* Returns an implementation of
diff --git a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java
index 8ce459f57b9..8cb3db59401 100644
--- a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java
+++ b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java
@@ -77,7 +77,7 @@ public class MultiFieldQueryParserWrapper extends QueryParserWrapper {
@SuppressWarnings("unchecked")
public MultiFieldQueryParserWrapper(String[] fields, Analyzer analyzer, Map boosts) {
this(fields, analyzer);
- StandardQueryParser qpHelper = (StandardQueryParser) getQueryParserHelper();
+ StandardQueryParser qpHelper = getQueryParserHelper();
qpHelper.setMultiFields(fields);
qpHelper.setFieldsBoost(boosts);
@@ -113,7 +113,7 @@ public MultiFieldQueryParserWrapper(String[] fields, Analyzer analyzer, Map boos
public MultiFieldQueryParserWrapper(String[] fields, Analyzer analyzer) {
super(null, analyzer);
- StandardQueryParser qpHelper = (StandardQueryParser) getQueryParserHelper();
+ StandardQueryParser qpHelper = getQueryParserHelper();
qpHelper.setAnalyzer(analyzer);
qpHelper.setMultiFields(fields);
diff --git a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java
index 51c6ca865ec..a5783d72dc7 100644
--- a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java
+++ b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java
@@ -345,7 +345,7 @@ public class QueryParserWrapper {
try {
QueryNode queryTree = this.syntaxParser.parse(query, getField());
queryTree = this.processorPipeline.process(queryTree);
- return (Query) this.builder.build(queryTree);
+ return this.builder.build(queryTree);
} catch (QueryNodeException e) {
throw new ParseException("parse exception");
diff --git a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java
index c7c2f80028f..fbcc46432c7 100644
--- a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java
+++ b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java
@@ -62,7 +62,7 @@ public class BoostAttributeImpl extends AttributeImpl
@Override
public boolean equals(Object other) {
- if (other instanceof BoostAttributeImpl && other != null
+ if (other instanceof BoostAttributeImpl
&& ((BoostAttributeImpl) other).boost == this.boost) {
return true;
diff --git a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java
index fdc24a801a3..1f0ee2bf6c7 100644
--- a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java
+++ b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java
@@ -62,7 +62,6 @@ public class DefaultPhraseSlopAttributeImpl extends AttributeImpl
public boolean equals(Object other) {
if (other instanceof DefaultPhraseSlopAttributeImpl
- && other != null
&& ((DefaultPhraseSlopAttributeImpl) other).defaultPhraseSlop == this.defaultPhraseSlop) {
return true;
diff --git a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java
index df04e633c11..853b4ce5098 100644
--- a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java
+++ b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java
@@ -66,7 +66,7 @@ public class FieldBoostMapAttributeImpl extends AttributeImpl
@Override
public boolean equals(Object other) {
- if (other instanceof FieldBoostMapAttributeImpl && other != null
+ if (other instanceof FieldBoostMapAttributeImpl
&& ((FieldBoostMapAttributeImpl) other).boosts.equals(this.boosts) ) {
return true;
diff --git a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java
index 76a2a78d092..ae9015e3f0f 100644
--- a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java
+++ b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java
@@ -63,7 +63,7 @@ public class FieldDateResolutionMapAttributeImpl extends AttributeImpl
@Override
public boolean equals(Object other) {
- if (other instanceof FieldDateResolutionMapAttributeImpl && other != null
+ if (other instanceof FieldDateResolutionMapAttributeImpl
&& ((FieldDateResolutionMapAttributeImpl) other).dateRes.equals(this.dateRes) ) {
return true;
diff --git a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java
index 5bd16025086..4cb85c4a6e3 100644
--- a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java
+++ b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java
@@ -72,7 +72,7 @@ public class FuzzyAttributeImpl extends AttributeImpl
@Override
public boolean equals(Object other) {
- if (other instanceof FuzzyAttributeImpl && other != null
+ if (other instanceof FuzzyAttributeImpl
&& ((FuzzyAttributeImpl) other).prefixLength == this.prefixLength) {
return true;
diff --git a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java
index 495b9319a7b..529d459d9a9 100644
--- a/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java
+++ b/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java
@@ -61,7 +61,6 @@ public class PositionIncrementsAttributeImpl extends AttributeImpl
public boolean equals(Object other) {
if (other instanceof PositionIncrementsAttributeImpl
- && other != null
&& ((PositionIncrementsAttributeImpl) other).positionIncrementsEnabled == this.positionIncrementsEnabled) {
return true;
diff --git a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerQPHelper.java b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerQPHelper.java
index 7e070c9e08b..111e3c04dbd 100644
--- a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerQPHelper.java
+++ b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerQPHelper.java
@@ -17,12 +17,10 @@ package org.apache.lucene.queryParser.standard;
* limitations under the License.
*/
-import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -31,10 +29,8 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.queryParser.core.QueryNodeException;
-import org.apache.lucene.queryParser.standard.StandardQueryParser;
import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* This test case is a copy of the core Lucene query parser test, it was adapted
@@ -156,9 +152,9 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
result = new TestFilter(result);
- result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
+ result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
return result;
}
}
@@ -226,9 +222,9 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
result = new TestPosIncrementFilter(result);
- result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
+ result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
return result;
}
}
diff --git a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java
index c1a05b6c27f..d8760365361 100644
--- a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java
+++ b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java
@@ -17,12 +17,10 @@ package org.apache.lucene.queryParser.standard;
* limitations under the License.
*/
-import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -31,9 +29,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.queryParser.standard.QueryParserWrapper;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* This test case is a copy of the core Lucene query parser test, it was adapted
@@ -150,9 +146,9 @@ public class TestMultiAnalyzerWrapper extends LuceneTestCase {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
result = new TestFilter(result);
- result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
+ result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
return result;
}
}
@@ -220,9 +216,9 @@ public class TestMultiAnalyzerWrapper extends LuceneTestCase {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
result = new TestPosIncrementFilter(result);
- result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
+ result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
return result;
}
}
diff --git a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
index a53eef3bd28..537b2c4b63b 100644
--- a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
+++ b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
@@ -22,14 +22,12 @@ import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.core.QueryNodeException;
-import org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler;
import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.IndexSearcher;
@@ -83,7 +81,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
String[] fields = { "b", "t" };
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
- mfqp.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ mfqp.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
Query q = mfqp.parse("one", null);
assertEquals("b:one t:one", q.toString());
@@ -153,7 +151,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
mfqp.setFieldsBoost(boosts);
- mfqp.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ mfqp.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
// Check for simple
Query q = mfqp.parse("one", null);
@@ -181,24 +179,24 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
public void testStaticMethod1() throws QueryNodeException {
String[] fields = { "b", "t" };
String[] queries = { "one", "two" };
- Query q = QueryParserUtil.parse(queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ Query q = QueryParserUtil.parse(queries, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("b:one t:two", q.toString());
String[] queries2 = { "+one", "+two" };
- q = QueryParserUtil.parse(queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse(queries2, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("(+b:one) (+t:two)", q.toString());
String[] queries3 = { "one", "+two" };
- q = QueryParserUtil.parse(queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse(queries3, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("b:one (+t:two)", q.toString());
String[] queries4 = { "one +more", "+two" };
- q = QueryParserUtil.parse(queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse(queries4, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("(b:one +b:more) (+t:two)", q.toString());
String[] queries5 = { "blah" };
try {
- q = QueryParserUtil.parse(queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse(queries5, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -222,15 +220,15 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = QueryParserUtil.parse("one", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+b:one -t:one", q.toString());
- q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
- q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -243,19 +241,19 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur.MUST_NOT };
StandardQueryParser parser = new StandardQueryParser();
parser.setMultiFields(fields);
- parser.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ parser.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
Query q = QueryParserUtil.parse("one", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));// , fields, flags, new
+ new StandardAnalyzer(TEST_VERSION_CURRENT));// , fields, flags, new
// StandardAnalyzer());
assertEquals("+b:one -t:one", q.toString());
- q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
- q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -268,13 +266,13 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD };
Query q = QueryParserUtil.parse(queries, fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+f1:one -f2:two f3:three", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil
- .parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ .parse(queries, fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -287,13 +285,13 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = QueryParserUtil.parse(queries, fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+b:one -t:two", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil
- .parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ .parse(queries, fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -319,7 +317,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
}
public void testStopWordSearching() throws Exception {
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
Directory ramDir = new RAMDirectory();
IndexWriter iw = new IndexWriter(ramDir, analyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
@@ -345,7 +343,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
* Return empty tokens for field "f1".
*/
private static class AnalyzerReturningNull extends Analyzer {
- StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer stdAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
public AnalyzerReturningNull() {
}
diff --git a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
index e3fd21dceaf..264a98b6501 100644
--- a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
+++ b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
@@ -22,7 +22,6 @@ import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
@@ -78,7 +77,7 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
public void testSimple() throws Exception {
String[] fields = { "b", "t" };
MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
- fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
Query q = mfqp.parse("one");
assertEquals("b:one t:one", q.toString());
@@ -146,7 +145,7 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
boosts.put("t", Float.valueOf(10));
String[] fields = { "b", "t" };
MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
- fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts);
+ fields, new StandardAnalyzer(TEST_VERSION_CURRENT), boosts);
// Check for simple
Query q = mfqp.parse("one");
@@ -175,28 +174,28 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
String[] fields = { "b", "t" };
String[] queries = { "one", "two" };
Query q = MultiFieldQueryParserWrapper.parse(queries, fields,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("b:one t:two", q.toString());
String[] queries2 = { "+one", "+two" };
q = MultiFieldQueryParserWrapper.parse(queries2, fields,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("(+b:one) (+t:two)", q.toString());
String[] queries3 = { "one", "+two" };
q = MultiFieldQueryParserWrapper.parse(queries3, fields,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("b:one (+t:two)", q.toString());
String[] queries4 = { "one +more", "+two" };
q = MultiFieldQueryParserWrapper.parse(queries4, fields,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("(b:one +b:more) (+t:two)", q.toString());
String[] queries5 = { "blah" };
try {
q = MultiFieldQueryParserWrapper.parse(queries5, fields,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -220,17 +219,17 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = MultiFieldQueryParserWrapper.parse("one", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+b:one -t:one", q.toString());
q = MultiFieldQueryParserWrapper.parse("one two", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = MultiFieldQueryParserWrapper.parse("blah", fields, flags2,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -243,21 +242,19 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
// MultiFieldQueryParserWrapper.PROHIBITED_FIELD};
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
- MultiFieldQueryParserWrapper parser = new MultiFieldQueryParserWrapper(
- fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
Query q = MultiFieldQueryParserWrapper.parse("one", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));// , fields, flags, new StandardAnalyzer());
+ new StandardAnalyzer(TEST_VERSION_CURRENT));// , fields, flags, new StandardAnalyzer());
assertEquals("+b:one -t:one", q.toString());
q = MultiFieldQueryParserWrapper.parse("one two", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = MultiFieldQueryParserWrapper.parse("blah", fields, flags2,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -270,13 +267,13 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD };
Query q = MultiFieldQueryParserWrapper.parse(queries, fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+f1:one -f2:two f3:three", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = MultiFieldQueryParserWrapper.parse(queries, fields, flags2,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -289,13 +286,13 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = MultiFieldQueryParserWrapper.parse(queries, fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+b:one -t:two", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = MultiFieldQueryParserWrapper.parse(queries, fields, flags2,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -319,7 +316,7 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
}
public void testStopWordSearching() throws Exception {
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
Directory ramDir = new RAMDirectory();
IndexWriter iw = new IndexWriter(ramDir, analyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
@@ -343,7 +340,7 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
* Return empty tokens for field "f1".
*/
private static class AnalyzerReturningNull extends Analyzer {
- StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer stdAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
public AnalyzerReturningNull() {
}
diff --git a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
index 02e305748d2..965c26affd6 100644
--- a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
+++ b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
@@ -38,7 +38,6 @@ import org.apache.lucene.analysis.LowerCaseTokenizer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -78,7 +77,6 @@ import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.util.LocalizedTestCase;
-import org.apache.lucene.util.Version;
/**
* This test case is a copy of the core Lucene query parser test, it was adapted
@@ -144,7 +142,7 @@ public class TestQPHelper extends LocalizedTestCase {
/** Filters LowerCaseTokenizer with StopFilter. */
@Override
public final TokenStream tokenStream(String fieldName, Reader reader) {
- return new QPTestFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader));
+ return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
}
}
@@ -197,14 +195,14 @@ public class TestQPHelper extends LocalizedTestCase {
private int originalMaxClauses;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
originalMaxClauses = BooleanQuery.getMaxClauseCount();
}
public StandardQueryParser getParser(Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(a);
@@ -294,7 +292,7 @@ public class TestQPHelper extends LocalizedTestCase {
public Query getQueryDOA(String query, Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(a);
qp.setDefaultOperator(Operator.AND);
@@ -314,7 +312,7 @@ public class TestQPHelper extends LocalizedTestCase {
}
public void testConstantScoreAutoRewrite() throws Exception {
- StandardQueryParser qp = new StandardQueryParser(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ StandardQueryParser qp = new StandardQueryParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
Query q = qp.parse("foo*bar", "field");
assertTrue(q instanceof WildcardQuery);
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((MultiTermQuery) q).getRewriteMethod());
@@ -339,9 +337,9 @@ public class TestQPHelper extends LocalizedTestCase {
public void testSimple() throws Exception {
assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
assertQueryEquals("term term term", null, "term term term");
- assertQueryEquals("t�rm term term", new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ assertQueryEquals("t�rm term term", new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
"t�rm term term");
- assertQueryEquals("�mlaut", new WhitespaceAnalyzer(Version.LUCENE_CURRENT), "�mlaut");
+ assertQueryEquals("�mlaut", new WhitespaceAnalyzer(TEST_VERSION_CURRENT), "�mlaut");
assertQueryEquals("\"\"", new KeywordAnalyzer(), "");
assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:");
@@ -398,7 +396,7 @@ public class TestQPHelper extends LocalizedTestCase {
}
public void testPunct() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@@ -419,7 +417,7 @@ public class TestQPHelper extends LocalizedTestCase {
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
- Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@@ -573,7 +571,7 @@ public class TestQPHelper extends LocalizedTestCase {
public void testFarsiRangeCollating() throws Exception {
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
@@ -583,7 +581,7 @@ public class TestQPHelper extends LocalizedTestCase {
IndexSearcher is = new IndexSearcher(ramDir, true);
StandardQueryParser qp = new StandardQueryParser();
- qp.setAnalyzer(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ qp.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the
@@ -737,7 +735,7 @@ public class TestQPHelper extends LocalizedTestCase {
}
public void testEscaped() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
/*
* assertQueryEquals("\\[brackets", a, "\\[brackets");
@@ -836,7 +834,7 @@ public class TestQPHelper extends LocalizedTestCase {
}
public void testQueryStringEscaping() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c");
assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c");
@@ -905,7 +903,7 @@ public class TestQPHelper extends LocalizedTestCase {
}
public void testBoost() throws Exception {
- StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.singleton("on"));
+ StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.singleton("on"));
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(oneStopAnalyzer);
@@ -921,7 +919,7 @@ public class TestQPHelper extends LocalizedTestCase {
assertNotNull(q);
StandardQueryParser qp2 = new StandardQueryParser();
- qp2.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ qp2.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
q = qp2.parse("the^3", "field");
// "the" is a stop word so the result is an empty query:
@@ -951,7 +949,7 @@ public class TestQPHelper extends LocalizedTestCase {
public void testCustomQueryParserWildcard() {
try {
- new QPTestParser(new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("a?t", "contents");
+ new QPTestParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("a?t", "contents");
fail("Wildcard queries should not be allowed");
} catch (QueryNodeException expected) {
// expected exception
@@ -960,7 +958,7 @@ public class TestQPHelper extends LocalizedTestCase {
public void testCustomQueryParserFuzzy() throws Exception {
try {
- new QPTestParser(new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("xunit~", "contents");
+ new QPTestParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("xunit~", "contents");
fail("Fuzzy queries should not be allowed");
} catch (QueryNodeException expected) {
// expected exception
@@ -971,7 +969,7 @@ public class TestQPHelper extends LocalizedTestCase {
BooleanQuery.setMaxClauseCount(2);
try {
StandardQueryParser qp = new StandardQueryParser();
- qp.setAnalyzer(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ qp.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.parse("one two three", "field");
fail("ParseException expected due to too many boolean clauses");
@@ -985,7 +983,7 @@ public class TestQPHelper extends LocalizedTestCase {
*/
public void testPrecedence() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
- qp.setAnalyzer(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ qp.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
Query query1 = qp.parse("A AND B OR C AND D", "field");
Query query2 = qp.parse("+A +B +C +D", "field");
@@ -996,7 +994,7 @@ public class TestQPHelper extends LocalizedTestCase {
public void testLocalDateFormat() throws IOException, QueryNodeException {
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
@@ -1077,7 +1075,7 @@ public class TestQPHelper extends LocalizedTestCase {
public void testStopwords() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(
- new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "foo" )));
+ new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "foo" )));
Query result = qp.parse("a:the OR a:foo", "a");
assertNotNull("result is null and it shouldn't be", result);
@@ -1100,7 +1098,7 @@ public class TestQPHelper extends LocalizedTestCase {
public void testPositionIncrement() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(
- new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "in", "are", "this" )));
+ new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "in", "are", "this" )));
qp.setEnablePositionIncrements(true);
@@ -1121,7 +1119,7 @@ public class TestQPHelper extends LocalizedTestCase {
public void testMatchAllDocs() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
- qp.setAnalyzer(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ qp.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
assertEquals(new MatchAllDocsQuery(), qp.parse("*:*", "field"));
assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)", "field"));
@@ -1133,7 +1131,7 @@ public class TestQPHelper extends LocalizedTestCase {
private void assertHits(int expected, String query, IndexSearcher is)
throws IOException, QueryNodeException {
StandardQueryParser qp = new StandardQueryParser();
- qp.setAnalyzer(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ qp.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.setLocale(Locale.ENGLISH);
Query q = qp.parse(query, "date");
@@ -1153,9 +1151,9 @@ public class TestQPHelper extends LocalizedTestCase {
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
BooleanQuery.setMaxClauseCount(originalMaxClauses);
+ super.tearDown();
}
private class CannedTokenStream extends TokenStream {
diff --git a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java
index f12d0db33bb..ee942a631cb 100644
--- a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java
+++ b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java
@@ -36,7 +36,6 @@ import org.apache.lucene.analysis.LowerCaseTokenizer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -73,7 +72,6 @@ import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LocalizedTestCase;
-import org.apache.lucene.util.Version;
/**
* This test case is a copy of the core Lucene query parser test, it was adapted
@@ -139,7 +137,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
/** Filters LowerCaseTokenizer with StopFilter. */
@Override
public final TokenStream tokenStream(String fieldName, Reader reader) {
- return new QPTestFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader));
+ return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
}
}
@@ -210,14 +208,14 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
private int originalMaxClauses;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
originalMaxClauses = BooleanQuery.getMaxClauseCount();
}
public QueryParserWrapper getParser(Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
QueryParserWrapper qp = new QueryParserWrapper("field", a);
qp.setDefaultOperator(QueryParserWrapper.OR_OPERATOR);
return qp;
@@ -302,7 +300,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
public Query getQueryDOA(String query, Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
QueryParserWrapper qp = new QueryParserWrapper("field", a);
qp.setDefaultOperator(QueryParserWrapper.AND_OPERATOR);
return qp.parse(query);
@@ -329,9 +327,9 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
public void testSimple() throws Exception {
assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
assertQueryEquals("term term term", null, "term term term");
- assertQueryEquals("t�rm term term", new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ assertQueryEquals("t�rm term term", new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
"t�rm term term");
- assertQueryEquals("�mlaut", new WhitespaceAnalyzer(Version.LUCENE_CURRENT), "�mlaut");
+ assertQueryEquals("�mlaut", new WhitespaceAnalyzer(TEST_VERSION_CURRENT), "�mlaut");
assertQueryEquals("\"\"", new KeywordAnalyzer(), "");
assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:");
@@ -386,7 +384,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
"+(title:dog title:cat) -author:\"bob dole\"");
QueryParserWrapper qp = new QueryParserWrapper("field",
- new StandardAnalyzer(Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
// make sure OR is the default:
assertEquals(QueryParserWrapper.OR_OPERATOR, qp.getDefaultOperator());
qp.setDefaultOperator(QueryParserWrapper.AND_OPERATOR);
@@ -396,7 +394,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
}
public void testPunct() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@@ -417,7 +415,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
- Analyzer a = new StandardAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@@ -552,7 +550,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
QueryParserWrapper qp = new QueryParserWrapper("field",
- new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ new SimpleAnalyzer(TEST_VERSION_CURRENT));
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod());
@@ -571,7 +569,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
public void testFarsiRangeCollating() throws Exception {
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
@@ -581,7 +579,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParserWrapper qp = new QueryParserWrapper("content",
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
@@ -683,7 +681,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
final String monthField = "month";
final String hourField = "hour";
QueryParserWrapper qp = new QueryParserWrapper("field",
- new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ new SimpleAnalyzer(TEST_VERSION_CURRENT));
// Don't set any date resolution and verify if DateField is used
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
@@ -727,7 +725,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
}
public void testEscaped() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
/*
* assertQueryEquals("\\[brackets", a, "\\[brackets");
@@ -824,7 +822,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
}
public void testQueryStringEscaping() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c");
assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c");
@@ -893,7 +891,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
}
public void testBoost() throws Exception {
- StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.singleton("on"));
+ StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.singleton("on"));
QueryParserWrapper qp = new QueryParserWrapper("field", oneStopAnalyzer);
Query q = qp.parse("on^1.0");
assertNotNull(q);
@@ -907,7 +905,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
assertNotNull(q);
QueryParserWrapper qp2 = new QueryParserWrapper("field",
- new StandardAnalyzer(Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
q = qp2.parse("the^3");
// "the" is a stop word so the result is an empty query:
assertNotNull(q);
@@ -935,7 +933,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
public void testCustomQueryParserWildcard() {
try {
- new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("a?t");
+ new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("a?t");
fail("Wildcard queries should not be allowed");
} catch (ParseException expected) {
// expected exception
@@ -944,7 +942,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
public void testCustomQueryParserFuzzy() throws Exception {
try {
- new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("xunit~");
+ new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("xunit~");
fail("Fuzzy queries should not be allowed");
} catch (ParseException expected) {
// expected exception
@@ -955,7 +953,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
BooleanQuery.setMaxClauseCount(2);
try {
QueryParserWrapper qp = new QueryParserWrapper("field",
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.parse("one two three");
fail("ParseException expected due to too many boolean clauses");
} catch (ParseException expected) {
@@ -968,7 +966,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
*/
public void testPrecedence() throws Exception {
QueryParserWrapper qp = new QueryParserWrapper("field",
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
Query query1 = qp.parse("A AND B OR C AND D");
Query query2 = qp.parse("+A +B +C +D");
@@ -978,7 +976,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
public void testLocalDateFormat() throws IOException, ParseException {
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
@@ -1057,7 +1055,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
}
public void testStopwords() throws Exception {
- QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "foo")));
+ QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "foo")));
Query result = qp.parse("a:the OR a:foo");
assertNotNull("result is null and it shouldn't be", result);
assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery);
@@ -1076,7 +1074,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
}
public void testPositionIncrement() throws Exception {
- QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "in", "are", "this")));
+ QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "in", "are", "this")));
qp.setEnablePositionIncrements(true);
String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\"";
// 0 2 5 7 8
@@ -1095,7 +1093,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
public void testMatchAllDocs() throws Exception {
QueryParserWrapper qp = new QueryParserWrapper("field",
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
assertEquals(new MatchAllDocsQuery(), qp.parse("*:*"));
assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)"));
BooleanQuery bq = (BooleanQuery) qp.parse("+*:* -*:*");
@@ -1106,7 +1104,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
private void assertHits(int expected, String query, IndexSearcher is)
throws ParseException, IOException {
QueryParserWrapper qp = new QueryParserWrapper("date",
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.setLocale(Locale.ENGLISH);
Query q = qp.parse(query);
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
@@ -1125,9 +1123,9 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
BooleanQuery.setMaxClauseCount(originalMaxClauses);
+ super.tearDown();
}
}
diff --git a/contrib/regex/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java b/contrib/regex/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java
index 2e0bab1445f..aed0521dfc0 100644
--- a/contrib/regex/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java
+++ b/contrib/regex/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java
@@ -19,6 +19,7 @@ package org.apache.lucene.search.regex;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BooleanClause;
@@ -51,7 +52,7 @@ public class SpanRegexQuery extends SpanQuery implements RegexQueryCapable {
public Query rewrite(IndexReader reader) throws IOException {
RegexQuery orig = new RegexQuery(term);
orig.setRegexImplementation(regexImpl);
- orig.setRewriteMethod(RegexQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+ orig.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
BooleanQuery bq = (BooleanQuery) orig.rewrite(reader);
BooleanClause[] clauses = bq.getClauses();
diff --git a/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java b/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
index 070b990cbab..2a09b0c5127 100644
--- a/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
+++ b/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
@@ -17,7 +17,6 @@ package org.apache.lucene.search.regex;
* limitations under the License.
*/
-import junit.framework.TestCase;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
@@ -29,18 +28,19 @@ import org.apache.lucene.index.TermEnum;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class TestRegexQuery extends TestCase {
+public class TestRegexQuery extends LuceneTestCase {
private IndexSearcher searcher;
private final String FN = "field";
@Override
- public void setUp() {
+ protected void setUp() throws Exception {
+ super.setUp();
RAMDirectory directory = new RAMDirectory();
try {
- IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
@@ -54,12 +54,9 @@ public class TestRegexQuery extends TestCase {
}
@Override
- public void tearDown() {
- try {
- searcher.close();
- } catch (Exception e) {
- fail(e.toString());
- }
+ protected void tearDown() throws Exception {
+ searcher.close();
+ super.tearDown();
}
private Term newTerm(String value) { return new Term(FN, value); }
diff --git a/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java b/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
index 4ca2939e366..27f689774ab 100644
--- a/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
+++ b/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
@@ -19,8 +19,6 @@ package org.apache.lucene.search.regex;
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
@@ -36,16 +34,17 @@ import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class TestSpanRegexQuery extends TestCase {
+public class TestSpanRegexQuery extends LuceneTestCase {
+
Directory indexStoreA = new RAMDirectory();
Directory indexStoreB = new RAMDirectory();
public void testSpanRegex() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
// doc.add(new Field("field", "the quick brown fox jumps over the lazy dog",
// Field.Store.NO, Field.Index.ANALYZED));
@@ -110,14 +109,14 @@ public class TestSpanRegexQuery extends TestCase {
Field.Index.ANALYZED_NO_NORMS));
// creating first index writer
- IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(TEST_VERSION_CURRENT),
true, IndexWriter.MaxFieldLength.LIMITED);
writerA.addDocument(lDoc);
writerA.optimize();
writerA.close();
// creating second index writer
- IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(TEST_VERSION_CURRENT),
true, IndexWriter.MaxFieldLength.LIMITED);
writerB.addDocument(lDoc2);
writerB.optimize();
diff --git a/contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java b/contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java
index f240f19123a..c718b2d6fef 100644
--- a/contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java
+++ b/contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java
@@ -18,9 +18,8 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
-import java.util.BitSet;
-import junit.framework.TestCase;
+import junit.framework.Assert;
import org.apache.lucene.index.IndexReader;
@@ -45,11 +44,11 @@ public class RemoteCachingWrapperFilterHelper extends RemoteCachingWrapperFilter
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
Filter cachedFilter = FilterManager.getInstance().getFilter(filter);
- TestCase.assertNotNull("Filter should not be null", cachedFilter);
+ Assert.assertNotNull("Filter should not be null", cachedFilter);
if (!shouldHaveCache) {
- TestCase.assertSame("First time filter should be the same ", filter, cachedFilter);
+ Assert.assertSame("First time filter should be the same ", filter, cachedFilter);
} else {
- TestCase.assertNotSame("We should have a cached version of the filter", filter, cachedFilter);
+ Assert.assertNotSame("We should have a cached version of the filter", filter, cachedFilter);
}
if (filter instanceof CachingWrapperFilterHelper) {
diff --git a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java
index 030b8e11e17..796f9b67892 100644
--- a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java
+++ b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java
@@ -21,7 +21,6 @@ import java.rmi.Naming;
import java.rmi.registry.LocateRegistry;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.analysis.SimpleAnalyzer;
@@ -58,7 +57,7 @@ public class TestRemoteCachingWrapperFilter extends LuceneTestCase {
private static void startServer() throws Exception {
// construct an index
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
diff --git a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java
index 001c3103c09..950f13b5dbf 100644
--- a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java
+++ b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java
@@ -18,7 +18,6 @@ package org.apache.lucene.search;
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.*;
@@ -59,7 +58,7 @@ public class TestRemoteSearchable extends LuceneTestCase {
private static void startServer() throws Exception {
// construct an index
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore,new SimpleAnalyzer(Version.LUCENE_CURRENT),true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(indexStore,new SimpleAnalyzer(TEST_VERSION_CURRENT),true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
@@ -86,9 +85,9 @@ public class TestRemoteSearchable extends LuceneTestCase {
assertTrue("document is null and it shouldn't be", document != null);
assertEquals("test text", document.get("test"));
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 2, document.getFields().size() == 2);
- Set ftl = new HashSet();
+ Set ftl = new HashSet();
ftl.add("other");
- FieldSelector fs = new SetBasedFieldSelector(ftl, Collections.EMPTY_SET);
+ FieldSelector fs = new SetBasedFieldSelector(ftl, Collections.emptySet());
document = searcher.doc(0, fs);
assertTrue("document is null and it shouldn't be", document != null);
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
diff --git a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java
index 144463a8acc..caf0060b707 100644
--- a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java
+++ b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java
@@ -38,7 +38,6 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
/**
@@ -110,7 +109,7 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable {
private Searcher getIndex (boolean even, boolean odd)
throws IOException {
RAMDirectory indexStore = new RAMDirectory ();
- IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(1000);
for (int i=0; i value(int slot) {
return Integer.valueOf(slotValues[slot]);
}
}
@@ -248,9 +247,9 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable {
public void testNormalizedScores() throws Exception {
// capture relevancy scores
- HashMap scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
- HashMap scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
- HashMap scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
+ HashMap scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
+ HashMap scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
+ HashMap scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
// we'll test searching locally, remote and multi
MultiSearcher remote = new MultiSearcher (new Searchable[] { getRemote() });
@@ -387,9 +386,9 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable {
assertEquals (expectedResult, buff.toString());
}
- private HashMap getScores (ScoreDoc[] hits, Searcher searcher)
+ private HashMap getScores (ScoreDoc[] hits, Searcher searcher)
throws IOException {
- HashMap scoreMap = new HashMap();
+ HashMap scoreMap = new HashMap();
int n = hits.length;
for (int i=0; i m1, HashMap, ?> m2) {
int n = m1.size();
int m = m2.size();
assertEquals (n, m);
- Iterator iter = m1.keySet().iterator();
+ Iterator> iter = m1.keySet().iterator();
while (iter.hasNext()) {
Object key = iter.next();
Object o1 = m1.get(key);
diff --git a/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java b/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java
index af20f9f07ef..74e5d8d353d 100644
--- a/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java
+++ b/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java
@@ -54,20 +54,17 @@ public class DistanceFieldComparatorSource extends FieldComparatorSource {
@Override
public FieldComparator newComparator(String fieldname, int numHits,
int sortPos, boolean reversed) throws IOException {
- dsdlc = new DistanceScoreDocLookupComparator(distanceFilter, numHits);
+ dsdlc = new DistanceScoreDocLookupComparator(numHits);
return dsdlc;
}
private class DistanceScoreDocLookupComparator extends FieldComparator {
- private DistanceFilter distanceFilter;
private double[] values;
private double bottom;
private int offset =0;
- public DistanceScoreDocLookupComparator(DistanceFilter distanceFilter,
- int numHits) {
- this.distanceFilter = distanceFilter;
+ public DistanceScoreDocLookupComparator(int numHits) {
values = new double[numHits];
return;
}
diff --git a/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceHandler.java b/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceHandler.java
index 7c1deef8db6..3c52d204058 100644
--- a/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceHandler.java
+++ b/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceHandler.java
@@ -31,7 +31,7 @@ import java.util.Map;
*/
public class DistanceHandler {
- public enum Precision {EXACT, TWOFEET, TWENTYFEET, TWOHUNDREDFEET};
+ public enum Precision {EXACT, TWOFEET, TWENTYFEET, TWOHUNDREDFEET}
private Map distances;
private Map distanceLookupCache;
diff --git a/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java b/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
index 455c021f315..6b25fcb4b75 100644
--- a/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
+++ b/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
@@ -21,8 +21,6 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -49,18 +47,11 @@ import org.apache.lucene.spatial.tier.projections.IProjector;
import org.apache.lucene.spatial.tier.projections.SinusoidalProjector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.Version;
-/**
- *
- */
-public class TestCartesian extends TestCase{
+public class TestCartesian extends LuceneTestCase {
- /**
- * @param args
- */
-
private Directory directory;
private IndexSearcher searcher;
// reston va
@@ -76,10 +67,11 @@ public class TestCartesian extends TestCase{
@Override
- protected void setUp() throws IOException {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
setUpPlotter( 2, 15);
@@ -495,8 +487,8 @@ public class TestCartesian extends TestCase{
// As the radius filter has performed the distance calculations
// already, pass in the filter to reuse the results.
//
- DistanceFieldComparatorSource dsort = new DistanceFieldComparatorSource(dq.distanceFilter);
- Sort sort = new Sort(new SortField("foo", dsort));
+ //DistanceFieldComparatorSource dsort = new DistanceFieldComparatorSource(dq.distanceFilter);
+ //Sort sort = new Sort(new SortField("foo", dsort));
// Perform the search, using the term query, the serial chain filter, and the
// distance sort
diff --git a/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java b/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java
index 77dc3ec1f56..f31d0635d0f 100644
--- a/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java
+++ b/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java
@@ -18,8 +18,6 @@ package org.apache.lucene.spatial.tier;
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -28,16 +26,11 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.Version;
import org.apache.lucene.store.RAMDirectory;
-
-/**
- *
- */
-public class TestDistance extends TestCase{
-
+public class TestDistance extends LuceneTestCase {
private RAMDirectory directory;
// reston va
@@ -48,16 +41,18 @@ public class TestDistance extends TestCase{
private IndexWriter writer;
@Override
- protected void setUp() throws IOException {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
- writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
addData(writer);
}
@Override
- protected void tearDown() throws IOException {
+ protected void tearDown() throws Exception {
writer.close();
+ super.tearDown();
}
private void addPoint(IndexWriter writer, String name, double lat, double lng) throws IOException{
diff --git a/contrib/spellchecker/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java b/contrib/spellchecker/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java
index 5833199e62a..38181732470 100644
--- a/contrib/spellchecker/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java
+++ b/contrib/spellchecker/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java
@@ -82,7 +82,7 @@ public class JaroWinklerDistance implements StringDistance {
public float getDistance(String s1, String s2) {
int[] mtp = matches(s1, s2);
- float m = (float) mtp[0];
+ float m = mtp[0];
if (m == 0) {
return 0f;
}
diff --git a/contrib/spellchecker/src/java/org/apache/lucene/search/spell/NGramDistance.java b/contrib/spellchecker/src/java/org/apache/lucene/search/spell/NGramDistance.java
index d2cb340e311..b6d8c5efb34 100644
--- a/contrib/spellchecker/src/java/org/apache/lucene/search/spell/NGramDistance.java
+++ b/contrib/spellchecker/src/java/org/apache/lucene/search/spell/NGramDistance.java
@@ -138,7 +138,7 @@ public class NGramDistance implements StringDistance {
// our last action in the above loop was to switch d and p, so p now
// actually has the most recent cost counts
- return 1.0f - ((float) p[sl] / Math.max(tl, sl));
+ return 1.0f - (p[sl] / Math.max(tl, sl));
}
}
diff --git a/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java b/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
index 50e1dd22a5a..cf9902d6f9e 100644
--- a/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
+++ b/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
@@ -20,8 +20,6 @@ package org.apache.lucene.search.spell;
import java.io.IOException;
import java.util.Iterator;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -29,27 +27,26 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Test case for LuceneDictionary.
* It first creates a simple index and then a couple of instances of LuceneDictionary
* on different fields and checks if all the right text comes back.
- *
*/
-public class TestLuceneDictionary extends TestCase {
+public class TestLuceneDictionary extends LuceneTestCase {
private Directory store = new RAMDirectory();
private IndexReader indexReader = null;
private LuceneDictionary ld;
- private Iterator it;
+ private Iterator it;
@Override
- public void setUp() throws Exception {
-
- IndexWriter writer = new IndexWriter(store, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ protected void setUp() throws Exception {
+ super.setUp();
+ IndexWriter writer = new IndexWriter(store, new WhitespaceAnalyzer(LuceneTestCase.TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc;
diff --git a/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java b/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
index 60fc184ee64..fd4d1ecd071 100755
--- a/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
+++ b/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
@@ -38,13 +38,9 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
/**
* Spell checker test case
- *
- *
*/
public class TestSpellChecker extends LuceneTestCase {
private SpellCheckerMock spellChecker;
@@ -58,7 +54,7 @@ public class TestSpellChecker extends LuceneTestCase {
//create a user index
userindex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(userindex, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(userindex, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
@@ -335,15 +331,16 @@ public class TestSpellChecker extends LuceneTestCase {
assertEquals(0, searcher.getIndexReader().getRefCount());
}
}
-
- private void showSearchersOpen() {
- int count = 0;
- for (IndexSearcher searcher : searchers) {
- if(searcher.getIndexReader().getRefCount() > 0)
- ++count;
- }
- System.out.println(count);
- }
+
+ // For debug
+// private void showSearchersOpen() {
+// int count = 0;
+// for (IndexSearcher searcher : searchers) {
+// if(searcher.getIndexReader().getRefCount() > 0)
+// ++count;
+// }
+// System.out.println(count);
+// }
private class SpellCheckWorker implements Runnable {
diff --git a/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/DistanceQuery.java b/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/DistanceQuery.java
index d8034ce5884..9e40f00ddb5 100644
--- a/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/DistanceQuery.java
+++ b/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/DistanceQuery.java
@@ -46,7 +46,7 @@ public class DistanceQuery extends ComposedQuery implements DistanceSubQuery {
public boolean subQueriesOrdered() {return ordered;}
public String distanceSubQueryNotAllowed() {
- Iterator sqi = getSubQueriesIterator();
+ Iterator> sqi = getSubQueriesIterator();
while (sqi.hasNext()) {
Object leq = sqi.next();
if (leq instanceof DistanceSubQuery) {
@@ -94,7 +94,7 @@ public class DistanceQuery extends ComposedQuery implements DistanceSubQuery {
float boost,
BasicQueryFactory qf) throws IOException {
SpanQuery[] spanNearClauses = new SpanQuery[getNrSubQueries()];
- Iterator sqi = getSubQueriesIterator();
+ Iterator> sqi = getSubQueriesIterator();
int qi = 0;
while (sqi.hasNext()) {
SpanNearClauseFactory sncf = new SpanNearClauseFactory(reader, fieldName, qf);
diff --git a/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/FieldsQuery.java b/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/FieldsQuery.java
index 9ef9ba65029..daea96d73e3 100644
--- a/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/FieldsQuery.java
+++ b/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/FieldsQuery.java
@@ -57,7 +57,6 @@ public class FieldsQuery extends SrndQuery { /* mostly untested */
qc = (SrndQuery) q.clone();
queries.add( new FieldsQuery( qc, fni.next(), fieldOp));
}
- boolean infix = true;
OrQuery oq = new OrQuery(queries,
true /* infix OR for field names */,
OrOperatorName);
diff --git a/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SimpleTerm.java b/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SimpleTerm.java
index b2d685841c1..436aabafc5c 100644
--- a/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SimpleTerm.java
+++ b/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SimpleTerm.java
@@ -44,7 +44,7 @@ public abstract class SimpleTerm
return this.toStringUnquoted().compareTo( ost.toStringUnquoted());
}
- protected void suffixToString(StringBuilder r) {;} /* override for prefix query */
+ protected void suffixToString(StringBuilder r) {} /* override for prefix query */
@Override
public String toString() {
diff --git a/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SpanNearClauseFactory.java b/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SpanNearClauseFactory.java
index d0566f33a5f..045974c0515 100644
--- a/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SpanNearClauseFactory.java
+++ b/contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SpanNearClauseFactory.java
@@ -52,23 +52,17 @@ Operations:
- SpanNotQuery: treat similar to subquery SpanNearQuery. (ok?)
*/
+import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
-import java.util.Comparator;
-import java.util.Arrays;
-
-import java.io.IOException;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
-
import org.apache.lucene.search.Query;
-
-import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
+import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
diff --git a/contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java b/contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java
index f6324195e04..1396f5c6c80 100644
--- a/contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java
+++ b/contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java
@@ -29,6 +29,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.queryParser.surround.parser.QueryParser;
import junit.framework.TestCase;
+import junit.framework.Assert;
public class BooleanQueryTst {
String queryText;
@@ -87,8 +88,8 @@ public class BooleanQueryTst {
float score = scorer.score();
docNr += docBase;
/* System.out.println(docNr + " '" + dBase.getDocs()[docNr] + "': " + score); */
- TestCase.assertTrue(queryText + ": positive score", score > 0.0);
- TestCase.assertTrue(queryText + ": too many hits", totalMatched < expectedDocNrs.length);
+ Assert.assertTrue(queryText + ": positive score", score > 0.0);
+ Assert.assertTrue(queryText + ": too many hits", totalMatched < expectedDocNrs.length);
int i;
for (i = 0; i < expectedDocNrs.length; i++) {
if ((! encountered[i]) && (expectedDocNrs[i] == docNr)) {
@@ -97,13 +98,13 @@ public class BooleanQueryTst {
}
}
if (i == expectedDocNrs.length) {
- TestCase.assertTrue(queryText + ": doc nr for hit not expected: " + docNr, false);
+ Assert.assertTrue(queryText + ": doc nr for hit not expected: " + docNr, false);
}
totalMatched++;
}
void checkNrHits() {
- TestCase.assertEquals(queryText + ": nr of hits", expectedDocNrs.length, totalMatched);
+ Assert.assertEquals(queryText + ": nr of hits", expectedDocNrs.length, totalMatched);
}
}
diff --git a/contrib/swing/src/test/org/apache/lucene/swing/models/BaseListModel.java b/contrib/swing/src/test/org/apache/lucene/swing/models/BaseListModel.java
index 6eb16ff9f42..e2b40f3d3e4 100644
--- a/contrib/swing/src/test/org/apache/lucene/swing/models/BaseListModel.java
+++ b/contrib/swing/src/test/org/apache/lucene/swing/models/BaseListModel.java
@@ -24,9 +24,9 @@ import javax.swing.AbstractListModel;
public class BaseListModel extends AbstractListModel {
- private List data = new ArrayList();
+ private List data = new ArrayList();
- public BaseListModel(Iterator iterator) {
+ public BaseListModel(Iterator> iterator) {
while (iterator.hasNext()) {
data.add(iterator.next());
}
diff --git a/contrib/swing/src/test/org/apache/lucene/swing/models/BaseTableModel.java b/contrib/swing/src/test/org/apache/lucene/swing/models/BaseTableModel.java
index 1093bfda169..cd8f4929458 100644
--- a/contrib/swing/src/test/org/apache/lucene/swing/models/BaseTableModel.java
+++ b/contrib/swing/src/test/org/apache/lucene/swing/models/BaseTableModel.java
@@ -24,10 +24,10 @@ import javax.swing.table.AbstractTableModel;
public class BaseTableModel extends AbstractTableModel {
- private List columnNames = new ArrayList();
- private List rows = new ArrayList();
+ private List columnNames = new ArrayList();
+ private List rows = new ArrayList();
- public BaseTableModel(Iterator data) {
+ public BaseTableModel(Iterator> data) {
columnNames.add("Name");
columnNames.add("Type");
columnNames.add("Phone");
@@ -37,7 +37,7 @@ public class BaseTableModel extends AbstractTableModel {
columnNames.add("Zip");
while (data.hasNext()) {
- Object nextRow = (Object) data.next();
+ Object nextRow = data.next();
rows.add(nextRow);
}
}
@@ -66,7 +66,7 @@ public class BaseTableModel extends AbstractTableModel {
}
@Override
- public Class getColumnClass(int columnIndex) {
+ public Class> getColumnClass(int columnIndex) {
return String.class;
}
diff --git a/contrib/swing/src/test/org/apache/lucene/swing/models/DataStore.java b/contrib/swing/src/test/org/apache/lucene/swing/models/DataStore.java
index 8c19db7f1c4..5313e0cc90e 100644
--- a/contrib/swing/src/test/org/apache/lucene/swing/models/DataStore.java
+++ b/contrib/swing/src/test/org/apache/lucene/swing/models/DataStore.java
@@ -30,7 +30,7 @@ public class DataStore {
private static final String STEAK_CATEGORY = "Steak";
private static int id = 0;
- static Collection restaurants = new ArrayList();
+ static Collection restaurants = new ArrayList();
static RestaurantInfo pinos = new RestaurantInfo();
static RestaurantInfo canolis = new RestaurantInfo();
static RestaurantInfo picadillo = new RestaurantInfo();
@@ -47,7 +47,7 @@ public class DataStore {
static RestaurantInfo outback4 = new RestaurantInfo();
- public static Iterator getRestaurants(){
+ public static Iterator getRestaurants(){
return restaurants.iterator();
}
diff --git a/contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicList.java b/contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicList.java
index 107b58cafdb..dafe8e898e7 100644
--- a/contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicList.java
+++ b/contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicList.java
@@ -28,11 +28,11 @@ import junit.framework.TestCase;
public class TestBasicList extends TestCase {
private ListModel baseListModel;
private ListSearcher listSearcher;
- private List list;
+ private List list;
@Override
protected void setUp() throws Exception {
- list = new ArrayList();
+ list = new ArrayList();
list.add(DataStore.canolis);
list.add(DataStore.chris);
diff --git a/contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicTable.java b/contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicTable.java
index 260ccfa54d1..b346fa835ec 100644
--- a/contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicTable.java
+++ b/contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicTable.java
@@ -27,11 +27,11 @@ import junit.framework.TestCase;
public class TestBasicTable extends TestCase {
private TableModel baseTableModel;
private TableSearcher tableSearcher;
- private List list;
+ private List list;
@Override
protected void setUp() throws Exception {
- list = new ArrayList();
+ list = new ArrayList();
list.add(DataStore.canolis);
list.add(DataStore.chris);
diff --git a/contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java b/contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java
index 7c1760bac77..9ac905086ca 100644
--- a/contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java
+++ b/contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java
@@ -17,7 +17,6 @@
package org.apache.lucene.wikipedia.analysis;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
diff --git a/contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java b/contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java
index 38d4815259e..77f7fd47563 100644
--- a/contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java
+++ b/contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java
@@ -67,7 +67,7 @@ public class WikipediaTokenizerTest extends BaseTokenStreamTestCase {
"'''same [[Category:foo]] goes for this '''''and2 [[Category:foo]] and this" +
" [http://foo.boo.com/test/test/ Test Test] [http://foo.boo.com/test/test/test.html Test Test]" +
" [http://foo.boo.com/test/test/test.html?g=b&c=d Test Test] [Citation] martian code ";
- Map tcm = new HashMap();//map tokens to types
+ Map tcm = new HashMap();//map tokens to types
tcm.put("link", WikipediaTokenizer.INTERNAL_LINK);
tcm.put("display", WikipediaTokenizer.INTERNAL_LINK);
tcm.put("info", WikipediaTokenizer.INTERNAL_LINK);
@@ -144,7 +144,7 @@ public class WikipediaTokenizerTest extends BaseTokenStreamTestCase {
while (tf.incrementToken()) {
String tokText = termAtt.term();
//System.out.println("Text: " + tokText + " Type: " + token.type());
- String expectedType = (String) tcm.get(tokText);
+ String expectedType = tcm.get(tokText);
assertTrue("expectedType is null and it shouldn't be for: " + tf.toString(), expectedType != null);
assertTrue(typeAtt.type() + " is not equal to " + expectedType + " for " + tf.toString(), typeAtt.type().equals(expectedType) == true);
count++;
@@ -264,7 +264,7 @@ public class WikipediaTokenizerTest extends BaseTokenStreamTestCase {
}
public void testLucene1133() throws Exception {
- Set untoks = new HashSet();
+ Set untoks = new HashSet();
untoks.add(WikipediaTokenizer.CATEGORY);
untoks.add(WikipediaTokenizer.ITALICS);
//should be exactly the same, regardless of untoks
@@ -342,7 +342,7 @@ public class WikipediaTokenizerTest extends BaseTokenStreamTestCase {
}
public void testBoth() throws Exception {
- Set untoks = new HashSet();
+ Set untoks = new HashSet();
untoks.add(WikipediaTokenizer.CATEGORY);
untoks.add(WikipediaTokenizer.ITALICS);
String test = "[[Category:a b c d]] [[Category:e f g]] [[link here]] [[link there]] ''italics here'' something ''more italics'' [[Category:h i j]]";
diff --git a/contrib/wordnet/src/java/org/apache/lucene/wordnet/AnalyzerUtil.java b/contrib/wordnet/src/java/org/apache/lucene/wordnet/AnalyzerUtil.java
index 5d598b7c0d7..256a4720218 100644
--- a/contrib/wordnet/src/java/org/apache/lucene/wordnet/AnalyzerUtil.java
+++ b/contrib/wordnet/src/java/org/apache/lucene/wordnet/AnalyzerUtil.java
@@ -46,7 +46,7 @@ import org.apache.lucene.util.AttributeSource;
*/
public class AnalyzerUtil {
- private AnalyzerUtil() {};
+ private AnalyzerUtil() {}
/**
* Returns a simple analyzer wrapper that logs all tokens produced by the
@@ -367,7 +367,7 @@ public class AnalyzerUtil {
public void setValue(int value) { this.value = value; }
@Override
public String toString() { return String.valueOf(value); }
- };
+ }
@@ -400,7 +400,7 @@ public class AnalyzerUtil {
// TODO: don't split on floating point numbers, e.g. 3.1415 (digit before or after '.')
/** Divides text into sentences; Includes inverted spanish exclamation and question mark */
- private static final Pattern SENTENCES = Pattern.compile("[!\\.\\?\\xA1\\xBF]+");
+// private static final Pattern SENTENCES = Pattern.compile("[!\\.\\?\\xA1\\xBF]+");
/**
* Returns at most the first N sentences of the given text. Delimiting
diff --git a/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java b/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
index da7a3a43241..d0b1f46f825 100755
--- a/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
+++ b/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
@@ -131,7 +131,7 @@ public final class SynExpand {
while ( it.hasNext())
{
// [2a] add to level words in
- String word = (String) it.next();
+ String word = it.next();
TermQuery tq = new TermQuery( new Term( field, word));
tmp.add( tq, BooleanClause.Occur.SHOULD);
diff --git a/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java b/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java
index a498712870c..455c8118c5a 100644
--- a/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java
+++ b/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java
@@ -68,10 +68,12 @@ import java.util.TreeSet;
* xxxx:[]
*
*
- * @see
+ * See also:
+ * prologdb
- * man page
- * @see Dave's synonym demo site
+ * man page
+ * Dave's synonym demo site
*/
public class SynonymMap {
@@ -389,7 +391,7 @@ public class SynonymMap {
System.arraycopy(output, 0, buffer, 0, len);
return buffer;
} finally {
- if (input != null) input.close();
+ input.close();
}
}
diff --git a/contrib/wordnet/src/test/org/apache/lucene/wordnet/TestSynonymTokenFilter.java b/contrib/wordnet/src/test/org/apache/lucene/wordnet/TestSynonymTokenFilter.java
index 734ba851dc8..9e53cc307a8 100644
--- a/contrib/wordnet/src/test/org/apache/lucene/wordnet/TestSynonymTokenFilter.java
+++ b/contrib/wordnet/src/test/org/apache/lucene/wordnet/TestSynonymTokenFilter.java
@@ -21,7 +21,6 @@ import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.Reader;
-import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
@@ -29,7 +28,6 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestSynonymTokenFilter extends BaseTokenStreamTestCase {
File dataDir = new File(System.getProperty("dataDir", "./bin"));
@@ -96,8 +94,8 @@ public class TestSynonymTokenFilter extends BaseTokenStreamTestCase {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream ts = new WhitespaceTokenizer(reader);
- ts = new LowerCaseFilter(Version.LUCENE_CURRENT, ts);
+ TokenStream ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
+ ts = new LowerCaseFilter(TEST_VERSION_CURRENT, ts);
ts = new SynonymTokenFilter(ts, synonyms, maxSynonyms);
return ts;
}
@@ -105,7 +103,7 @@ public class TestSynonymTokenFilter extends BaseTokenStreamTestCase {
private class SavedStreams {
Tokenizer source;
TokenStream result;
- };
+ }
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader)
@@ -113,8 +111,8 @@ public class TestSynonymTokenFilter extends BaseTokenStreamTestCase {
SavedStreams streams = (SavedStreams) getPreviousTokenStream();
if (streams == null) {
streams = new SavedStreams();
- streams.source = new WhitespaceTokenizer(reader);
- streams.result = new LowerCaseFilter(Version.LUCENE_CURRENT, streams.source);
+ streams.source = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
+ streams.result = new LowerCaseFilter(TEST_VERSION_CURRENT, streams.source);
streams.result = new SynonymTokenFilter(streams.result, synonyms, maxSynonyms);
setPreviousTokenStream(streams);
} else {
diff --git a/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/QueryTemplateManager.java b/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/QueryTemplateManager.java
index 9fe92bf95f5..575188b2b70 100644
--- a/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/QueryTemplateManager.java
+++ b/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/QueryTemplateManager.java
@@ -11,7 +11,6 @@ import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.Result;
-import javax.xml.transform.Source;
import javax.xml.transform.Templates;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerConfigurationException;
diff --git a/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/BoostingTermBuilder.java b/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/BoostingTermBuilder.java
index dfd93048d12..c565022a66b 100644
--- a/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/BoostingTermBuilder.java
+++ b/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/BoostingTermBuilder.java
@@ -2,7 +2,6 @@ package org.apache.lucene.xmlparser.builders;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.search.payloads.PayloadTermQuery;
import org.apache.lucene.search.payloads.AveragePayloadFunction;
import org.apache.lucene.xmlparser.DOMUtils;
diff --git a/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/DuplicateFilterBuilder.java b/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/DuplicateFilterBuilder.java
index 18a9676f5ef..4ca6d8b6121 100644
--- a/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/DuplicateFilterBuilder.java
+++ b/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/DuplicateFilterBuilder.java
@@ -3,17 +3,12 @@
*/
package org.apache.lucene.xmlparser.builders;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanFilter;
import org.apache.lucene.search.DuplicateFilter;
import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.FilterClause;
import org.apache.lucene.xmlparser.DOMUtils;
import org.apache.lucene.xmlparser.FilterBuilder;
import org.apache.lucene.xmlparser.ParserException;
import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
diff --git a/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java b/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java
index 969713b0bec..b23b729f997 100644
--- a/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java
+++ b/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java
@@ -7,8 +7,6 @@ import java.util.StringTokenizer;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.TransformerException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Field;
@@ -16,6 +14,7 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
import org.w3c.dom.Document;
import org.xml.sax.SAXException;
@@ -39,10 +38,10 @@ import org.xml.sax.SAXException;
* This class illustrates how form input (such as from a web page or Swing gui) can be
* turned into Lucene queries using a choice of XSL templates for different styles of queries.
*/
-public class TestQueryTemplateManager extends TestCase {
+public class TestQueryTemplateManager extends LuceneTestCase {
CoreParser builder;
- Analyzer analyzer=new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer=new StandardAnalyzer(TEST_VERSION_CURRENT);
private IndexSearcher searcher;
//A collection of documents' field values for use in our tests
@@ -160,5 +159,6 @@ public class TestQueryTemplateManager extends TestCase {
@Override
protected void tearDown() throws Exception {
searcher.close();
+ super.tearDown();
}
}
diff --git a/src/demo/org/apache/lucene/demo/IndexFiles.java b/src/demo/org/apache/lucene/demo/IndexFiles.java
index 6b175416836..def29566986 100644
--- a/src/demo/org/apache/lucene/demo/IndexFiles.java
+++ b/src/demo/org/apache/lucene/demo/IndexFiles.java
@@ -91,7 +91,6 @@ public class IndexFiles {
// at least on windows, some temporary files raise this exception with an "access denied" message
// checking if the file can be read doesn't help
catch (FileNotFoundException fnfe) {
- ;
}
}
}
diff --git a/src/demo/org/apache/lucene/demo/html/Entities.java b/src/demo/org/apache/lucene/demo/html/Entities.java
index d6828a9384d..5c5a4f23ed8 100644
--- a/src/demo/org/apache/lucene/demo/html/Entities.java
+++ b/src/demo/org/apache/lucene/demo/html/Entities.java
@@ -38,7 +38,7 @@ public class Entities {
new Character((char)Integer.parseInt(entity.substring(start), radix));
return c.toString();
} else {
- String s = (String)decoder.get(entity);
+ String s = decoder.get(entity);
if (s != null)
return s;
else return "";
@@ -49,17 +49,15 @@ public class Entities {
int length = s.length();
StringBuffer buffer = new StringBuffer(length * 2);
for (int i = 0; i < length; i++) {
- char c = s.charAt(i);
- int j = (int)c;
+ int j = s.charAt(i);
if (j < 0x100 && encoder[j] != null) {
buffer.append(encoder[j]); // have a named encoding
buffer.append(';');
} else if (j < 0x80) {
- buffer.append(c); // use ASCII value
+ buffer.append((char) j); // use ASCII value
} else {
buffer.append(""); // use numeric encoding
- buffer.append((int)c);
- buffer.append(';');
+ buffer.append(j).append(';');
}
}
return buffer.toString();
diff --git a/src/java/org/apache/lucene/analysis/CharArrayMap.java b/src/java/org/apache/lucene/analysis/CharArrayMap.java
index 5c1c72e9ccb..e9d929b7b4d 100644
--- a/src/java/org/apache/lucene/analysis/CharArrayMap.java
+++ b/src/java/org/apache/lucene/analysis/CharArrayMap.java
@@ -488,7 +488,7 @@ public class CharArrayMap extends AbstractMap {
@Override
public String toString() {
return new StringBuilder().append(keys[pos]).append('=')
- .append(((Object) values[pos] == (Object) CharArrayMap.this) ? "(this Map)" : values[pos])
+ .append((values[pos] == CharArrayMap.this) ? "(this Map)" : values[pos])
.toString();
}
}
diff --git a/src/java/org/apache/lucene/analysis/CharArraySet.java b/src/java/org/apache/lucene/analysis/CharArraySet.java
index fda8531fd9e..a86811a55df 100644
--- a/src/java/org/apache/lucene/analysis/CharArraySet.java
+++ b/src/java/org/apache/lucene/analysis/CharArraySet.java
@@ -17,7 +17,6 @@ package org.apache.lucene.analysis;
* limitations under the License.
*/
-import java.util.Arrays;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Iterator;
diff --git a/src/java/org/apache/lucene/analysis/NumericTokenStream.java b/src/java/org/apache/lucene/analysis/NumericTokenStream.java
index 20781247f32..1d5830ff638 100644
--- a/src/java/org/apache/lucene/analysis/NumericTokenStream.java
+++ b/src/java/org/apache/lucene/analysis/NumericTokenStream.java
@@ -22,8 +22,6 @@ import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.document.NumericField; // for javadocs
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
import org.apache.lucene.search.NumericRangeFilter; // for javadocs
-import org.apache.lucene.search.SortField; // for javadocs
-import org.apache.lucene.search.FieldCache; // javadocs
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
@@ -162,7 +160,7 @@ public final class NumericTokenStream extends TokenStream {
* new Field(name, new NumericTokenStream(precisionStep).setIntValue(value))
*/
public NumericTokenStream setIntValue(final int value) {
- this.value = (long) value;
+ this.value = value;
valSize = 32;
shift = 0;
return this;
@@ -188,7 +186,7 @@ public final class NumericTokenStream extends TokenStream {
* new Field(name, new NumericTokenStream(precisionStep).setFloatValue(value))
*/
public NumericTokenStream setFloatValue(final float value) {
- this.value = (long) NumericUtils.floatToSortableInt(value);
+ this.value = NumericUtils.floatToSortableInt(value);
valSize = 32;
shift = 0;
return this;
diff --git a/src/java/org/apache/lucene/analysis/StopAnalyzer.java b/src/java/org/apache/lucene/analysis/StopAnalyzer.java
index be03e8160a1..387281a9104 100644
--- a/src/java/org/apache/lucene/analysis/StopAnalyzer.java
+++ b/src/java/org/apache/lucene/analysis/StopAnalyzer.java
@@ -24,7 +24,6 @@ import java.util.Arrays;
import java.util.Set;
import java.util.List;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.util.Version;
/** Filters {@link LetterTokenizer} with {@link LowerCaseFilter} and {@link StopFilter}.
@@ -91,9 +90,12 @@ public final class StopAnalyzer extends StopwordAnalyzerBase {
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a {@link LowerCaseTokenizer} filtered with
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link LowerCaseTokenizer} filtered with
* {@link StopFilter}
*/
@Override
diff --git a/src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java b/src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java
index 616fce39d39..c83ea2e1915 100644
--- a/src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java
+++ b/src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java
@@ -124,7 +124,7 @@ public final class TeeSinkTokenFilter extends TokenFilter {
* This instance is exhausted after this, but all sinks are instant available.
*/
public void consumeAllTokens() throws IOException {
- while (incrementToken());
+ while (incrementToken()) {}
}
@Override
diff --git a/src/java/org/apache/lucene/analysis/Token.java b/src/java/org/apache/lucene/analysis/Token.java
index bf2d07ebeba..5ccf5e289ee 100644
--- a/src/java/org/apache/lucene/analysis/Token.java
+++ b/src/java/org/apache/lucene/analysis/Token.java
@@ -525,7 +525,7 @@ public class Token extends AttributeImpl
Token t = (Token)super.clone();
// Do a deep clone
if (termBuffer != null) {
- t.termBuffer = (char[]) termBuffer.clone();
+ t.termBuffer = termBuffer.clone();
}
if (payload != null) {
t.payload = (Payload) payload.clone();
diff --git a/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java b/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java
index 44384f12ef0..d9058c4dac3 100644
--- a/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java
+++ b/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java
@@ -47,7 +47,7 @@ public class StandardAnalyzer extends Analyzer {
/**
* Specifies whether deprecated acronyms should be replaced with HOST type.
- * See {@linkplain https://issues.apache.org/jira/browse/LUCENE-1068}
+ * See {@linkplain "https://issues.apache.org/jira/browse/LUCENE-1068"}
*/
private final boolean replaceInvalidAcronym;
diff --git a/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java b/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java
index 384e7870f71..9a02cdf6c4e 100644
--- a/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java
+++ b/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java
@@ -20,7 +20,6 @@ package org.apache.lucene.analysis.standard;
import java.io.IOException;
import java.io.Reader;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
diff --git a/src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java b/src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java
index ec24be772bc..11edcd2be27 100644
--- a/src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java
+++ b/src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java
@@ -183,7 +183,7 @@ public class TermAttributeImpl extends AttributeImpl implements TermAttribute, C
TermAttributeImpl t = (TermAttributeImpl)super.clone();
// Do a deep clone
if (termBuffer != null) {
- t.termBuffer = (char[]) termBuffer.clone();
+ t.termBuffer = termBuffer.clone();
}
return t;
}
diff --git a/src/java/org/apache/lucene/document/AbstractField.java b/src/java/org/apache/lucene/document/AbstractField.java
index a8249bc226f..8f025aca33c 100755
--- a/src/java/org/apache/lucene/document/AbstractField.java
+++ b/src/java/org/apache/lucene/document/AbstractField.java
@@ -18,6 +18,7 @@ package org.apache.lucene.document;
import org.apache.lucene.search.PhraseQuery; // for javadocs
import org.apache.lucene.search.spans.SpanQuery; // for javadocs
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.util.StringHelper; // for javadocs
@@ -85,7 +86,7 @@ public abstract class AbstractField implements Fieldable {
* the range of that encoding.
*
* @see org.apache.lucene.document.Document#setBoost(float)
- * @see org.apache.lucene.search.Similarity#computeNorm(String, org.apache.lucene.index.FieldInvertState)
+ * @see org.apache.lucene.search.Similarity#computeNorm(String, FieldInvertState)
* @see org.apache.lucene.search.Similarity#encodeNormValue(float)
*/
public void setBoost(float boost) {
diff --git a/src/java/org/apache/lucene/document/NumericField.java b/src/java/org/apache/lucene/document/NumericField.java
index a0c4e64d303..b1ccf52c8bc 100644
--- a/src/java/org/apache/lucene/document/NumericField.java
+++ b/src/java/org/apache/lucene/document/NumericField.java
@@ -140,7 +140,7 @@ import org.apache.lucene.search.FieldCache; // javadocs
*/
public final class NumericField extends AbstractField {
- private final NumericTokenStream tokenStream;
+ private final NumericTokenStream numericTS;
/**
* Creates a field for numeric values using the default precisionStep
@@ -195,12 +195,12 @@ public final class NumericField extends AbstractField {
public NumericField(String name, int precisionStep, Field.Store store, boolean index) {
super(name, store, index ? Field.Index.ANALYZED_NO_NORMS : Field.Index.NO, Field.TermVector.NO);
setOmitTermFreqAndPositions(true);
- tokenStream = new NumericTokenStream(precisionStep);
+ numericTS = new NumericTokenStream(precisionStep);
}
/** Returns a {@link NumericTokenStream} for indexing the numeric value. */
public TokenStream tokenStreamValue() {
- return isIndexed() ? tokenStream : null;
+ return isIndexed() ? numericTS : null;
}
/** Returns always null
for numeric fields */
@@ -231,7 +231,7 @@ public final class NumericField extends AbstractField {
* document.add(new NumericField(name, precisionStep).setLongValue(value))
*/
public NumericField setLongValue(final long value) {
- tokenStream.setLongValue(value);
+ numericTS.setLongValue(value);
fieldsData = Long.valueOf(value);
return this;
}
@@ -243,7 +243,7 @@ public final class NumericField extends AbstractField {
* document.add(new NumericField(name, precisionStep).setIntValue(value))
*/
public NumericField setIntValue(final int value) {
- tokenStream.setIntValue(value);
+ numericTS.setIntValue(value);
fieldsData = Integer.valueOf(value);
return this;
}
@@ -255,7 +255,7 @@ public final class NumericField extends AbstractField {
* document.add(new NumericField(name, precisionStep).setDoubleValue(value))
*/
public NumericField setDoubleValue(final double value) {
- tokenStream.setDoubleValue(value);
+ numericTS.setDoubleValue(value);
fieldsData = Double.valueOf(value);
return this;
}
@@ -267,7 +267,7 @@ public final class NumericField extends AbstractField {
* document.add(new NumericField(name, precisionStep).setFloatValue(value))
*/
public NumericField setFloatValue(final float value) {
- tokenStream.setFloatValue(value);
+ numericTS.setFloatValue(value);
fieldsData = Float.valueOf(value);
return this;
}
diff --git a/src/java/org/apache/lucene/index/CheckIndex.java b/src/java/org/apache/lucene/index/CheckIndex.java
index e3b4d25f42c..13f95d6ea4d 100644
--- a/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/src/java/org/apache/lucene/index/CheckIndex.java
@@ -899,7 +899,7 @@ public class CheckIndex {
System.out.println("");
final int exitCode;
- if (result != null && result.clean == true)
+ if (result.clean == true)
exitCode = 0;
else
exitCode = 1;
diff --git a/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java b/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
index b12cc121fc1..37b73a9d31a 100644
--- a/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
+++ b/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
@@ -350,13 +350,13 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
protected class MergeThread extends Thread {
- IndexWriter writer;
+ IndexWriter tWriter;
MergePolicy.OneMerge startMerge;
MergePolicy.OneMerge runningMerge;
private volatile boolean done;
public MergeThread(IndexWriter writer, MergePolicy.OneMerge startMerge) throws IOException {
- this.writer = writer;
+ this.tWriter = writer;
this.startMerge = startMerge;
}
@@ -408,9 +408,9 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
// Subsequent times through the loop we do any new
// merge that writer says is necessary:
- merge = writer.getNextMerge();
+ merge = tWriter.getNextMerge();
if (merge != null) {
- writer.mergeInit(merge);
+ tWriter.mergeInit(merge);
updateMergeThreads();
if (verbose())
message(" merge thread: do another merge " + merge.segString(dir));
diff --git a/src/java/org/apache/lucene/index/DocFieldConsumers.java b/src/java/org/apache/lucene/index/DocFieldConsumers.java
index bccbf2c17dc..50a6ceac575 100644
--- a/src/java/org/apache/lucene/index/DocFieldConsumers.java
+++ b/src/java/org/apache/lucene/index/DocFieldConsumers.java
@@ -132,21 +132,21 @@ final class DocFieldConsumers extends DocFieldConsumer {
class PerDoc extends DocumentsWriter.DocWriter {
- DocumentsWriter.DocWriter one;
- DocumentsWriter.DocWriter two;
+ DocumentsWriter.DocWriter writerOne;
+ DocumentsWriter.DocWriter writerTwo;
@Override
public long sizeInBytes() {
- return one.sizeInBytes() + two.sizeInBytes();
+ return writerOne.sizeInBytes() + writerTwo.sizeInBytes();
}
@Override
public void finish() throws IOException {
try {
try {
- one.finish();
+ writerOne.finish();
} finally {
- two.finish();
+ writerTwo.finish();
}
} finally {
freePerDoc(this);
@@ -157,9 +157,9 @@ final class DocFieldConsumers extends DocFieldConsumer {
public void abort() {
try {
try {
- one.abort();
+ writerOne.abort();
} finally {
- two.abort();
+ writerTwo.abort();
}
} finally {
freePerDoc(this);
diff --git a/src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java b/src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java
index 34268f3cd3b..99d56ee725d 100644
--- a/src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java
+++ b/src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java
@@ -62,8 +62,8 @@ final class DocFieldConsumersPerThread extends DocFieldConsumerPerThread {
both.docID = docState.docID;
assert oneDoc.docID == docState.docID;
assert twoDoc.docID == docState.docID;
- both.one = oneDoc;
- both.two = twoDoc;
+ both.writerOne = oneDoc;
+ both.writerTwo = twoDoc;
return both;
}
}
diff --git a/src/java/org/apache/lucene/index/FieldsReader.java b/src/java/org/apache/lucene/index/FieldsReader.java
index 21cc15630e2..c9dfc9b829d 100644
--- a/src/java/org/apache/lucene/index/FieldsReader.java
+++ b/src/java/org/apache/lucene/index/FieldsReader.java
@@ -352,9 +352,9 @@ final class FieldsReader implements Cloneable {
final byte[] b = new byte[toRead];
fieldsStream.readBytes(b, 0, b.length);
if (compressed) {
- doc.add(new Field(fi.name, uncompress(b), Field.Store.YES));
+ doc.add(new Field(fi.name, uncompress(b)));
} else {
- doc.add(new Field(fi.name, b, Field.Store.YES));
+ doc.add(new Field(fi.name, b));
}
} else {
Field.Store store = Field.Store.YES;
@@ -400,7 +400,7 @@ final class FieldsReader implements Cloneable {
sizebytes[1] = (byte) (bytesize>>>16);
sizebytes[2] = (byte) (bytesize>>> 8);
sizebytes[3] = (byte) bytesize ;
- doc.add(new Field(fi.name, sizebytes, Field.Store.YES));
+ doc.add(new Field(fi.name, sizebytes));
return size;
}
diff --git a/src/java/org/apache/lucene/index/IndexReader.java b/src/java/org/apache/lucene/index/IndexReader.java
index 729c3eec8f1..ab9da4261b9 100644
--- a/src/java/org/apache/lucene/index/IndexReader.java
+++ b/src/java/org/apache/lucene/index/IndexReader.java
@@ -67,7 +67,7 @@ import java.util.concurrent.atomic.AtomicInteger;
NOTE : {@link
- IndexReader
} instances are completely thread
+ IndexReader} instances are completely thread
safe, meaning multiple threads can call any of its methods,
concurrently. If your application requires external
synchronization, you should not synchronize on the
@@ -428,8 +428,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
* mutable state obeys "copy on write" semantics to ensure
* the changes are not seen by other readers.
*
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
*/
@Override
public synchronized Object clone() {
diff --git a/src/java/org/apache/lucene/index/IndexWriter.java b/src/java/org/apache/lucene/index/IndexWriter.java
index 6035405cf57..4a0fa8b3e8e 100644
--- a/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/src/java/org/apache/lucene/index/IndexWriter.java
@@ -138,7 +138,7 @@ import java.util.Map;
directly.
NOTE : {@link
- IndexWriter
} instances are completely thread
+ IndexWriter} instances are completely thread
safe, meaning multiple threads can call any of its
methods, concurrently. If your application requires
external synchronization, you should not
diff --git a/src/java/org/apache/lucene/index/MultiReader.java b/src/java/org/apache/lucene/index/MultiReader.java
index 3933815b532..f29a7b3f864 100644
--- a/src/java/org/apache/lucene/index/MultiReader.java
+++ b/src/java/org/apache/lucene/index/MultiReader.java
@@ -48,7 +48,6 @@ public class MultiReader extends IndexReader implements Cloneable {
* left to the subreaders.
* Note that all subreaders are closed if this Multireader is closed.
* @param subReaders set of (sub)readers
- * @throws IOException
*/
public MultiReader(IndexReader... subReaders) {
initialize(subReaders, true);
@@ -61,7 +60,6 @@ public class MultiReader extends IndexReader implements Cloneable {
* @param closeSubReaders indicates whether the subreaders should be closed
* when this MultiReader is closed
* @param subReaders set of (sub)readers
- * @throws IOException
*/
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) {
initialize(subReaders, closeSubReaders);
diff --git a/src/java/org/apache/lucene/index/Payload.java b/src/java/org/apache/lucene/index/Payload.java
index 5c4a4168e23..50a99129d69 100644
--- a/src/java/org/apache/lucene/index/Payload.java
+++ b/src/java/org/apache/lucene/index/Payload.java
@@ -164,7 +164,7 @@ public class Payload implements Serializable, Cloneable {
// Only copy the part of data that belongs to this Payload
if (offset == 0 && length == data.length) {
// It is the whole thing, so just clone it.
- clone.data = (byte[]) data.clone();
+ clone.data = data.clone();
}
else {
// Just get the part
diff --git a/src/java/org/apache/lucene/index/ReusableStringReader.java b/src/java/org/apache/lucene/index/ReusableStringReader.java
index 902cd0bafaa..ebd5108d2cb 100644
--- a/src/java/org/apache/lucene/index/ReusableStringReader.java
+++ b/src/java/org/apache/lucene/index/ReusableStringReader.java
@@ -53,6 +53,6 @@ final class ReusableStringReader extends Reader {
}
}
@Override
- public void close() {};
+ public void close() {}
}
diff --git a/src/java/org/apache/lucene/index/SegmentInfo.java b/src/java/org/apache/lucene/index/SegmentInfo.java
index 25d47ec58f2..acf6e0b2c93 100644
--- a/src/java/org/apache/lucene/index/SegmentInfo.java
+++ b/src/java/org/apache/lucene/index/SegmentInfo.java
@@ -318,7 +318,7 @@ public final class SegmentInfo {
si.hasSingleNormFile = hasSingleNormFile;
si.diagnostics = new HashMap(diagnostics);
if (normGen != null) {
- si.normGen = (long[]) normGen.clone();
+ si.normGen = normGen.clone();
}
si.docStoreOffset = docStoreOffset;
si.docStoreSegment = docStoreSegment;
diff --git a/src/java/org/apache/lucene/index/SegmentInfos.java b/src/java/org/apache/lucene/index/SegmentInfos.java
index b9b87f59231..826e24e287c 100644
--- a/src/java/org/apache/lucene/index/SegmentInfos.java
+++ b/src/java/org/apache/lucene/index/SegmentInfos.java
@@ -705,9 +705,7 @@ public final class SegmentInfos extends Vector {
message("fallback to prior segment file '" + prevSegmentFileName + "'");
try {
Object v = doBody(prevSegmentFileName);
- if (exc != null) {
- message("success on fallback " + prevSegmentFileName);
- }
+ message("success on fallback " + prevSegmentFileName);
return v;
} catch (IOException err2) {
message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry");
diff --git a/src/java/org/apache/lucene/index/StoredFieldsWriter.java b/src/java/org/apache/lucene/index/StoredFieldsWriter.java
index 707a68f826a..5b15d437cca 100644
--- a/src/java/org/apache/lucene/index/StoredFieldsWriter.java
+++ b/src/java/org/apache/lucene/index/StoredFieldsWriter.java
@@ -63,7 +63,6 @@ final class StoredFieldsWriter {
if (fieldsWriter == null) {
final String docStoreSegment = docWriter.getDocStoreSegment();
if (docStoreSegment != null) {
- assert docStoreSegment != null;
fieldsWriter = new FieldsWriter(docWriter.directory,
docStoreSegment,
fieldInfos);
diff --git a/src/java/org/apache/lucene/index/TermVectorsReader.java b/src/java/org/apache/lucene/index/TermVectorsReader.java
index a15efeae070..8fc6cfa436d 100644
--- a/src/java/org/apache/lucene/index/TermVectorsReader.java
+++ b/src/java/org/apache/lucene/index/TermVectorsReader.java
@@ -208,7 +208,7 @@ class TermVectorsReader implements Cloneable {
// make all effort to close up. Keep the first exception
// and throw it as a new one.
IOException keep = null;
- if (tvx != null) try { tvx.close(); } catch (IOException e) { if (keep == null) keep = e; }
+ if (tvx != null) try { tvx.close(); } catch (IOException e) { keep = e; }
if (tvd != null) try { tvd.close(); } catch (IOException e) { if (keep == null) keep = e; }
if (tvf != null) try { tvf.close(); } catch (IOException e) { if (keep == null) keep = e; }
if (keep != null) throw (IOException) keep.fillInStackTrace();
diff --git a/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java b/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
index f0ac4b9d50b..24c122a4249 100644
--- a/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
+++ b/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
@@ -151,8 +151,6 @@ final class TermVectorsTermsWriter extends TermsHashConsumer {
if (docStoreSegment == null)
return;
- assert docStoreSegment != null;
-
// If we hit an exception while init'ing the term
// vector output files, we must abort this segment
// because those files will be in an unknown
@@ -198,8 +196,8 @@ final class TermVectorsTermsWriter extends TermsHashConsumer {
tvd.writeVLong(pos-lastPos);
lastPos = pos;
}
- perDoc.tvf.writeTo(tvf);
- perDoc.tvf.reset();
+ perDoc.perDocTvf.writeTo(tvf);
+ perDoc.perDocTvf.reset();
perDoc.numVectorFields = 0;
}
@@ -252,14 +250,14 @@ final class TermVectorsTermsWriter extends TermsHashConsumer {
// TODO: use something more memory efficient; for small
// docs the 1024 buffer size of RAMOutputStream wastes alot
- RAMOutputStream tvf = new RAMOutputStream();
+ RAMOutputStream perDocTvf = new RAMOutputStream();
int numVectorFields;
int[] fieldNumbers = new int[1];
long[] fieldPointers = new long[1];
void reset() {
- tvf.reset();
+ perDocTvf.reset();
numVectorFields = 0;
}
@@ -277,13 +275,13 @@ final class TermVectorsTermsWriter extends TermsHashConsumer {
fieldPointers = ArrayUtil.grow(fieldPointers);
}
fieldNumbers[numVectorFields] = fieldNumber;
- fieldPointers[numVectorFields] = tvf.getFilePointer();
+ fieldPointers[numVectorFields] = perDocTvf.getFilePointer();
numVectorFields++;
}
@Override
public long sizeInBytes() {
- return tvf.sizeInBytes();
+ return perDocTvf.sizeInBytes();
}
@Override
diff --git a/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java b/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
index 8c7f9d2168e..c1ca5473ab8 100644
--- a/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
+++ b/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
@@ -74,8 +74,8 @@ final class TermVectorsTermsWriterPerField extends TermsHashConsumerPerField {
perThread.doc = termsWriter.getPerDoc();
perThread.doc.docID = docState.docID;
assert perThread.doc.numVectorFields == 0;
- assert 0 == perThread.doc.tvf.length();
- assert 0 == perThread.doc.tvf.getFilePointer();
+ assert 0 == perThread.doc.perDocTvf.length();
+ assert 0 == perThread.doc.perDocTvf.getFilePointer();
} else {
assert perThread.doc.docID == docState.docID;
@@ -114,7 +114,7 @@ final class TermVectorsTermsWriterPerField extends TermsHashConsumerPerField {
if (numPostings > maxNumPostings)
maxNumPostings = numPostings;
- final IndexOutput tvf = perThread.doc.tvf;
+ final IndexOutput tvf = perThread.doc.perDocTvf;
// This is called once, after inverting all occurrences
// of a given field in the doc. At this point we flush
@@ -216,7 +216,7 @@ final class TermVectorsTermsWriterPerField extends TermsHashConsumerPerField {
p.freq = 1;
if (doVectorOffsets) {
- int startOffset = fieldState.offset + offsetAttribute.startOffset();;
+ int startOffset = fieldState.offset + offsetAttribute.startOffset();
int endOffset = fieldState.offset + offsetAttribute.endOffset();
termsHashPerField.writeVInt(1, startOffset);
@@ -239,7 +239,7 @@ final class TermVectorsTermsWriterPerField extends TermsHashConsumerPerField {
p.freq++;
if (doVectorOffsets) {
- int startOffset = fieldState.offset + offsetAttribute.startOffset();;
+ int startOffset = fieldState.offset + offsetAttribute.startOffset();
int endOffset = fieldState.offset + offsetAttribute.endOffset();
termsHashPerField.writeVInt(1, startOffset - p.lastOffset);
diff --git a/src/java/org/apache/lucene/index/TermVectorsWriter.java b/src/java/org/apache/lucene/index/TermVectorsWriter.java
index 2d10cd9b21e..2870dd0ec12 100644
--- a/src/java/org/apache/lucene/index/TermVectorsWriter.java
+++ b/src/java/org/apache/lucene/index/TermVectorsWriter.java
@@ -199,7 +199,7 @@ final class TermVectorsWriter {
try {
tvx.close();
} catch (IOException e) {
- if (keep == null) keep = e;
+ keep = e;
}
if (tvd != null)
try {
diff --git a/src/java/org/apache/lucene/index/TermsHash.java b/src/java/org/apache/lucene/index/TermsHash.java
index 74fc2d0c320..2a26d8ce6e1 100644
--- a/src/java/org/apache/lucene/index/TermsHash.java
+++ b/src/java/org/apache/lucene/index/TermsHash.java
@@ -61,7 +61,7 @@ final class TermsHash extends InvertedDocConsumer {
// targets 25-50% fill factor; approximate this
// as 3X # pointers
bytesPerPosting = consumer.bytesPerPosting() + 4*DocumentsWriter.POINTER_NUM_BYTE;
- postingsFreeChunk = (int) (DocumentsWriter.BYTE_BLOCK_SIZE / bytesPerPosting);
+ postingsFreeChunk = (DocumentsWriter.BYTE_BLOCK_SIZE / bytesPerPosting);
}
@Override
diff --git a/src/java/org/apache/lucene/index/TermsHashPerField.java b/src/java/org/apache/lucene/index/TermsHashPerField.java
index da6b2ab816b..dead601b050 100644
--- a/src/java/org/apache/lucene/index/TermsHashPerField.java
+++ b/src/java/org/apache/lucene/index/TermsHashPerField.java
@@ -351,7 +351,7 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
// term text into textStart address
// Get the text of this term.
- final char[] tokenText = termAtt.termBuffer();;
+ final char[] tokenText = termAtt.termBuffer();
final int tokenTextLen = termAtt.termLength();
// Compute hashcode & replace any invalid UTF16 sequences
diff --git a/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java b/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
index a870e92b022..e9d12dc530c 100644
--- a/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
+++ b/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
@@ -84,7 +84,7 @@ class DisjunctionMaxScorer extends Scorer {
return doc;
}
- /** Determine the current document score. Initially invalid, until {@link #next()} is called the first time.
+ /** Determine the current document score. Initially invalid, until {@link #nextDoc()} is called the first time.
* @return the score of the current generated document
*/
@Override
diff --git a/src/java/org/apache/lucene/search/DisjunctionSumScorer.java b/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
index f2c0656d269..bb327d9218d 100644
--- a/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
+++ b/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
@@ -188,7 +188,7 @@ class DisjunctionSumScorer extends Scorer {
}
/** Returns the score of the current document matching the query.
- * Initially invalid, until {@link #next()} is called the first time.
+ * Initially invalid, until {@link #nextDoc()} is called the first time.
*/
@Override
public float score() throws IOException { return currentScore; }
@@ -199,7 +199,7 @@ class DisjunctionSumScorer extends Scorer {
}
/** Returns the number of subscorers matching the current document.
- * Initially invalid, until {@link #next()} is called the first time.
+ * Initially invalid, until {@link #nextDoc()} is called the first time.
*/
public int nrMatchers() {
return nrMatchers;
diff --git a/src/java/org/apache/lucene/search/DocIdSetIterator.java b/src/java/org/apache/lucene/search/DocIdSetIterator.java
index 7be02d535b7..f10d04c0d48 100644
--- a/src/java/org/apache/lucene/search/DocIdSetIterator.java
+++ b/src/java/org/apache/lucene/search/DocIdSetIterator.java
@@ -28,8 +28,6 @@ import java.io.IOException;
*/
public abstract class DocIdSetIterator {
- private int doc = -1;
-
/**
* When returned by {@link #nextDoc()}, {@link #advance(int)} and
* {@link #docID()} it means there are no more docs in the iterator.
diff --git a/src/java/org/apache/lucene/search/FieldCache.java b/src/java/org/apache/lucene/search/FieldCache.java
index acaf60bd063..b928176f6e6 100644
--- a/src/java/org/apache/lucene/search/FieldCache.java
+++ b/src/java/org/apache/lucene/search/FieldCache.java
@@ -501,7 +501,7 @@ public interface FieldCache {
public static abstract class CacheEntry {
public abstract Object getReaderKey();
public abstract String getFieldName();
- public abstract Class getCacheType();
+ public abstract Class> getCacheType();
public abstract Object getCustom();
public abstract Object getValue();
private String size = null;
diff --git a/src/java/org/apache/lucene/search/FieldCacheImpl.java b/src/java/org/apache/lucene/search/FieldCacheImpl.java
index c304c9b63c3..929c9195235 100644
--- a/src/java/org/apache/lucene/search/FieldCacheImpl.java
+++ b/src/java/org/apache/lucene/search/FieldCacheImpl.java
@@ -296,7 +296,7 @@ class FieldCacheImpl implements FieldCache {
}
return retArray;
}
- };
+ }
// inherit javadocs
public short[] getShorts (IndexReader reader, String field) throws IOException {
@@ -343,7 +343,7 @@ class FieldCacheImpl implements FieldCache {
}
return retArray;
}
- };
+ }
// inherit javadocs
public int[] getInts (IndexReader reader, String field) throws IOException {
@@ -398,7 +398,7 @@ class FieldCacheImpl implements FieldCache {
retArray = new int[reader.maxDoc()];
return retArray;
}
- };
+ }
// inherit javadocs
@@ -456,7 +456,7 @@ class FieldCacheImpl implements FieldCache {
retArray = new float[reader.maxDoc()];
return retArray;
}
- };
+ }
public long[] getLongs(IndexReader reader, String field) throws IOException {
@@ -510,7 +510,7 @@ class FieldCacheImpl implements FieldCache {
retArray = new long[reader.maxDoc()];
return retArray;
}
- };
+ }
// inherit javadocs
public double[] getDoubles(IndexReader reader, String field)
@@ -566,7 +566,7 @@ class FieldCacheImpl implements FieldCache {
retArray = new double[reader.maxDoc()];
return retArray;
}
- };
+ }
// inherit javadocs
public String[] getStrings(IndexReader reader, String field)
@@ -602,7 +602,7 @@ class FieldCacheImpl implements FieldCache {
}
return retArray;
}
- };
+ }
// inherit javadocs
public StringIndex getStringIndex(IndexReader reader, String field)
@@ -666,7 +666,7 @@ class FieldCacheImpl implements FieldCache {
StringIndex value = new StringIndex (retArray, mterms);
return value;
}
- };
+ }
private volatile PrintStream infoStream;
diff --git a/src/java/org/apache/lucene/search/FieldComparator.java b/src/java/org/apache/lucene/search/FieldComparator.java
index 0f7544cb782..b003eddd3b3 100644
--- a/src/java/org/apache/lucene/search/FieldComparator.java
+++ b/src/java/org/apache/lucene/search/FieldComparator.java
@@ -160,7 +160,7 @@ public abstract class FieldComparator {
* @param slot the value
* @return value in this slot upgraded to Comparable
*/
- public abstract Comparable value(int slot);
+ public abstract Comparable> value(int slot);
/** Parses field's values as byte (using {@link
* FieldCache#getBytes} and sorts by ascending value */
@@ -203,7 +203,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Byte.valueOf(values[slot]);
}
}
@@ -249,7 +249,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Integer.valueOf(docIDs[slot]);
}
}
@@ -310,7 +310,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Double.valueOf(values[slot]);
}
}
@@ -375,7 +375,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Float.valueOf(values[slot]);
}
}
@@ -444,7 +444,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Integer.valueOf(values[slot]);
}
}
@@ -509,7 +509,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Long.valueOf(values[slot]);
}
}
@@ -564,7 +564,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Float.valueOf(scores[slot]);
}
}
@@ -610,7 +610,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Short.valueOf(values[slot]);
}
}
@@ -676,7 +676,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return values[slot];
}
}
@@ -825,7 +825,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return values[slot];
}
@@ -904,7 +904,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return values[slot];
}
}
diff --git a/src/java/org/apache/lucene/search/FieldValueHitQueue.java b/src/java/org/apache/lucene/search/FieldValueHitQueue.java
index e3a0293b2fe..4d82696ca6e 100644
--- a/src/java/org/apache/lucene/search/FieldValueHitQueue.java
+++ b/src/java/org/apache/lucene/search/FieldValueHitQueue.java
@@ -203,7 +203,7 @@ public abstract class FieldValueHitQueue extends PriorityQueue[] fields = new Comparable[n];
for (int i = 0; i < n; ++i) {
fields[i] = comparators[i].value(entry.slot);
}
diff --git a/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java b/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java
index 8fb9008836b..afd265724d0 100644
--- a/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java
+++ b/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java
@@ -45,7 +45,7 @@ public abstract class FilteredDocIdSetIterator extends DocIdSetIterator {
* Validation method to determine whether a docid should be in the result set.
* @param doc docid to be tested
* @return true if input docid should be in the result set, false otherwise.
- * @see #FilteredDocIdSetIterator(DocIdSetIterator).
+ * @see #FilteredDocIdSetIterator(DocIdSetIterator)
*/
abstract protected boolean match(int doc) throws IOException;
diff --git a/src/java/org/apache/lucene/search/IndexSearcher.java b/src/java/org/apache/lucene/search/IndexSearcher.java
index 59df2d47d47..c3c0588647a 100644
--- a/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -36,8 +36,8 @@ import org.apache.lucene.util.ReaderUtil;
* or {@link #search(Query,Filter,int)} methods. For performance reasons it is
* recommended to open only one IndexSearcher and use it for all of your searches.
*
- * NOTE : {@link
- * IndexSearcher
} instances are completely
+ *
NOTE : {@link
+ * IndexSearcher}
instances are completely
* thread safe, meaning multiple threads can call any of its
* methods, concurrently. If your application requires
* external synchronization, you should not
@@ -55,9 +55,9 @@ public class IndexSearcher extends Searcher {
/** Creates a searcher searching the index in the named
* directory, with readOnly=true
+ * @param path directory where IndexReader will be opened
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
- * @param path directory where IndexReader will be opened
*/
public IndexSearcher(Directory path) throws CorruptIndexException, IOException {
this(IndexReader.open(path, true), true);
@@ -68,11 +68,11 @@ public class IndexSearcher extends Searcher {
* gives much better concurrent performance, unless you
* intend to do write operations (delete documents or
* change norms) with the underlying IndexReader.
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
* @param path directory where IndexReader will be opened
* @param readOnly if true, the underlying IndexReader
* will be opened readOnly
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws IOException if there is a low-level IO error
*/
public IndexSearcher(Directory path, boolean readOnly) throws CorruptIndexException, IOException {
this(IndexReader.open(path, readOnly), true);
diff --git a/src/java/org/apache/lucene/search/ReqExclScorer.java b/src/java/org/apache/lucene/search/ReqExclScorer.java
index 0ebaef16254..a32922b1947 100644
--- a/src/java/org/apache/lucene/search/ReqExclScorer.java
+++ b/src/java/org/apache/lucene/search/ReqExclScorer.java
@@ -95,7 +95,7 @@ class ReqExclScorer extends Scorer {
}
/** Returns the score of the current document matching the query.
- * Initially invalid, until {@link #next()} is called the first time.
+ * Initially invalid, until {@link #nextDoc()} is called the first time.
* @return The score of the required scorer.
*/
@Override
diff --git a/src/java/org/apache/lucene/search/ReqOptSumScorer.java b/src/java/org/apache/lucene/search/ReqOptSumScorer.java
index db4fc379217..c8e1b81ff54 100644
--- a/src/java/org/apache/lucene/search/ReqOptSumScorer.java
+++ b/src/java/org/apache/lucene/search/ReqOptSumScorer.java
@@ -59,7 +59,7 @@ class ReqOptSumScorer extends Scorer {
}
/** Returns the score of the current document matching the query.
- * Initially invalid, until {@link #next()} is called the first time.
+ * Initially invalid, until {@link #nextDoc()} is called the first time.
* @return The score of the required scorer, eventually increased by the score
* of the optional scorer when it also matches the current document.
*/
diff --git a/src/java/org/apache/lucene/search/Similarity.java b/src/java/org/apache/lucene/search/Similarity.java
index a48cd8e80b9..c2705d84e59 100644
--- a/src/java/org/apache/lucene/search/Similarity.java
+++ b/src/java/org/apache/lucene/search/Similarity.java
@@ -402,7 +402,7 @@ import java.util.Collection;
*
* The sum of squared weights (of the query terms) is
* computed by the query {@link org.apache.lucene.search.Weight} object.
- * For example, a {@link org.apache.lucene.search.BooleanQuery boolean query}
+ * For example, a {@link org.apache.lucene.search.BooleanQuery}
* computes this value as:
*
*
@@ -609,7 +609,7 @@ public abstract class Similarity implements Serializable {
* @return the calculated float norm
*/
public float computeNorm(String field, FieldInvertState state) {
- return (float) (state.getBoost() * lengthNorm(field, state.getLength()));
+ return (state.getBoost() * lengthNorm(field, state.getLength()));
}
/** Computes the normalization value for a field given the total number of
diff --git a/src/java/org/apache/lucene/search/function/ByteFieldSource.java b/src/java/org/apache/lucene/search/function/ByteFieldSource.java
index cb667b4de50..2d9775f3124 100644
--- a/src/java/org/apache/lucene/search/function/ByteFieldSource.java
+++ b/src/java/org/apache/lucene/search/function/ByteFieldSource.java
@@ -39,10 +39,7 @@ import java.io.IOException;
* composite (multi-segment) reader, this can easily cause
* double RAM usage for the values in the FieldCache. It's
* best to switch your application to pass only atomic
- * (single segment) readers to this API. Alternatively, for
- * a short-term fix, you could wrap your ValueSource using
- * {@link MultiValueSource}, which costs more CPU per lookup
- * but will not consume double the FieldCache RAM.
+ * (single segment) readers to this API.
*/
public class ByteFieldSource extends FieldCacheSource {
private FieldCache.ByteParser parser;
@@ -76,7 +73,7 @@ public class ByteFieldSource extends FieldCacheSource {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
@Override
public float floatVal(int doc) {
- return (float) arr[doc];
+ return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
@Override
diff --git a/src/java/org/apache/lucene/search/function/CustomScoreProvider.java b/src/java/org/apache/lucene/search/function/CustomScoreProvider.java
index 7557dbb6cf3..2af0187f104 100644
--- a/src/java/org/apache/lucene/search/function/CustomScoreProvider.java
+++ b/src/java/org/apache/lucene/search/function/CustomScoreProvider.java
@@ -20,7 +20,6 @@ package org.apache.lucene.search.function;
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.FieldCache; // for javadocs
diff --git a/src/java/org/apache/lucene/search/function/CustomScoreQuery.java b/src/java/org/apache/lucene/search/function/CustomScoreQuery.java
index bddc853139b..0cad1e05818 100755
--- a/src/java/org/apache/lucene/search/function/CustomScoreQuery.java
+++ b/src/java/org/apache/lucene/search/function/CustomScoreQuery.java
@@ -294,7 +294,6 @@ public class CustomScoreQuery extends Query {
private final float qWeight;
private Scorer subQueryScorer;
private Scorer[] valSrcScorers;
- private IndexReader reader;
private final CustomScoreProvider provider;
private float vScores[]; // reused in score() to avoid allocating this array for each doc
@@ -305,7 +304,6 @@ public class CustomScoreQuery extends Query {
this.qWeight = w.getValue();
this.subQueryScorer = subQueryScorer;
this.valSrcScorers = valSrcScorers;
- this.reader = reader;
this.vScores = new float[valSrcScorers.length];
this.provider = CustomScoreQuery.this.getCustomScoreProvider(reader);
}
diff --git a/src/java/org/apache/lucene/search/function/DocValues.java b/src/java/org/apache/lucene/search/function/DocValues.java
index 4f28d72550b..5b7db51aa94 100755
--- a/src/java/org/apache/lucene/search/function/DocValues.java
+++ b/src/java/org/apache/lucene/search/function/DocValues.java
@@ -70,7 +70,7 @@ public abstract class DocValues {
* @param doc document whose double value is requested.
*/
public double doubleVal(int doc) {
- return (double) floatVal(doc);
+ return floatVal(doc);
}
/**
diff --git a/src/java/org/apache/lucene/search/function/FloatFieldSource.java b/src/java/org/apache/lucene/search/function/FloatFieldSource.java
index 72d2ae397da..c702ca37ae7 100644
--- a/src/java/org/apache/lucene/search/function/FloatFieldSource.java
+++ b/src/java/org/apache/lucene/search/function/FloatFieldSource.java
@@ -39,10 +39,7 @@ import java.io.IOException;
* composite (multi-segment) reader, this can easily cause
* double RAM usage for the values in the FieldCache. It's
* best to switch your application to pass only atomic
- * (single segment) readers to this API. Alternatively, for
- * a short-term fix, you could wrap your ValueSource using
- * {@link MultiValueSource}, which costs more CPU per lookup
- * but will not consume double the FieldCache RAM.
+ * (single segment) readers to this API.
*/
public class FloatFieldSource extends FieldCacheSource {
private FieldCache.FloatParser parser;
diff --git a/src/java/org/apache/lucene/search/function/IntFieldSource.java b/src/java/org/apache/lucene/search/function/IntFieldSource.java
index a38533e0806..685eb15f45a 100755
--- a/src/java/org/apache/lucene/search/function/IntFieldSource.java
+++ b/src/java/org/apache/lucene/search/function/IntFieldSource.java
@@ -39,10 +39,7 @@ import java.io.IOException;
* composite (multi-segment) reader, this can easily cause
* double RAM usage for the values in the FieldCache. It's
* best to switch your application to pass only atomic
- * (single segment) readers to this API. Alternatively, for
- * a short-term fix, you could wrap your ValueSource using
- * {@link MultiValueSource}, which costs more CPU per lookup
- * but will not consume double the FieldCache RAM.
+ * (single segment) readers to this API.
*/
public class IntFieldSource extends FieldCacheSource {
private FieldCache.IntParser parser;
@@ -76,7 +73,7 @@ public class IntFieldSource extends FieldCacheSource {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
@Override
public float floatVal(int doc) {
- return (float) arr[doc];
+ return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
@Override
diff --git a/src/java/org/apache/lucene/search/function/OrdFieldSource.java b/src/java/org/apache/lucene/search/function/OrdFieldSource.java
index fa24facbe9f..05032a012fe 100644
--- a/src/java/org/apache/lucene/search/function/OrdFieldSource.java
+++ b/src/java/org/apache/lucene/search/function/OrdFieldSource.java
@@ -74,7 +74,7 @@ public class OrdFieldSource extends ValueSource {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
@Override
public float floatVal(int doc) {
- return (float)arr[doc];
+ return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#strVal(int) */
@Override
diff --git a/src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java b/src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java
index 0cf446783c9..d2938093f60 100644
--- a/src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java
+++ b/src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java
@@ -79,7 +79,7 @@ public class ReverseOrdFieldSource extends ValueSource {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
@Override
public float floatVal(int doc) {
- return (float)(end - arr[doc]);
+ return (end - arr[doc]);
}
/* (non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
@Override
diff --git a/src/java/org/apache/lucene/search/function/ShortFieldSource.java b/src/java/org/apache/lucene/search/function/ShortFieldSource.java
index 157bdd454aa..444a9156a01 100644
--- a/src/java/org/apache/lucene/search/function/ShortFieldSource.java
+++ b/src/java/org/apache/lucene/search/function/ShortFieldSource.java
@@ -39,10 +39,7 @@ import java.io.IOException;
* composite (multi-segment) reader, this can easily cause
* double RAM usage for the values in the FieldCache. It's
* best to switch your application to pass only atomic
- * (single segment) readers to this API. Alternatively, for
- * a short-term fix, you could wrap your ValueSource using
- * {@link MultiValueSource}, which costs more CPU per lookup
- * but will not consume double the FieldCache RAM.
+ * (single segment) readers to this API.
*/
public class ShortFieldSource extends FieldCacheSource {
private FieldCache.ShortParser parser;
@@ -76,7 +73,7 @@ public class ShortFieldSource extends FieldCacheSource {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
@Override
public float floatVal(int doc) {
- return (float) arr[doc];
+ return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
@Override
diff --git a/src/java/org/apache/lucene/search/function/ValueSourceQuery.java b/src/java/org/apache/lucene/search/function/ValueSourceQuery.java
index 82dad2d5c62..71ae79b5575 100644
--- a/src/java/org/apache/lucene/search/function/ValueSourceQuery.java
+++ b/src/java/org/apache/lucene/search/function/ValueSourceQuery.java
@@ -125,7 +125,6 @@ public class ValueSourceQuery extends Query {
* be used. (assuming field is indexed for this doc, with a single token.)
*/
private class ValueSourceScorer extends Scorer {
- private final ValueSourceWeight weight;
private final float qWeight;
private final DocValues vals;
private final TermDocs termDocs;
@@ -134,8 +133,7 @@ public class ValueSourceQuery extends Query {
// constructor
private ValueSourceScorer(Similarity similarity, IndexReader reader, ValueSourceWeight w) throws IOException {
super(similarity);
- this.weight = w;
- this.qWeight = w.getValue();
+ qWeight = w.getValue();
// this is when/where the values are first created.
vals = valSrc.getValues(reader);
termDocs = reader.termDocs(null);
diff --git a/src/java/org/apache/lucene/store/IndexOutput.java b/src/java/org/apache/lucene/store/IndexOutput.java
index bc34b5026d1..6497b1f6b50 100644
--- a/src/java/org/apache/lucene/store/IndexOutput.java
+++ b/src/java/org/apache/lucene/store/IndexOutput.java
@@ -119,7 +119,7 @@ public abstract class IndexOutput implements Closeable {
throws IOException {
final int end = start + length;
for (int i = start; i < end; i++) {
- final int code = (int)s.charAt(i);
+ final int code = s.charAt(i);
if (code >= 0x01 && code <= 0x7F)
writeByte((byte)code);
else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) {
@@ -145,7 +145,7 @@ public abstract class IndexOutput implements Closeable {
throws IOException {
final int end = start + length;
for (int i = start; i < end; i++) {
- final int code = (int)s[i];
+ final int code = s[i];
if (code >= 0x01 && code <= 0x7F)
writeByte((byte)code);
else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) {
@@ -210,7 +210,7 @@ public abstract class IndexOutput implements Closeable {
* undefined. Otherwise the file is truncated.
* @param length file length
*/
- public void setLength(long length) throws IOException {};
+ public void setLength(long length) throws IOException {}
public void writeStringStringMap(Map map) throws IOException {
if (map == null) {
diff --git a/src/java/org/apache/lucene/store/LockObtainFailedException.java b/src/java/org/apache/lucene/store/LockObtainFailedException.java
index e8902dadfae..0c61caf2f33 100644
--- a/src/java/org/apache/lucene/store/LockObtainFailedException.java
+++ b/src/java/org/apache/lucene/store/LockObtainFailedException.java
@@ -24,7 +24,7 @@ import java.io.IOException;
* could not be acquired. This
* happens when a writer tries to open an index
* that another writer already has open.
- * @see Lock#obtain(long).
+ * @see Lock#obtain(long)
*/
public class LockObtainFailedException extends IOException {
public LockObtainFailedException(String message) {
diff --git a/src/java/org/apache/lucene/store/LockReleaseFailedException.java b/src/java/org/apache/lucene/store/LockReleaseFailedException.java
index 4f15b4800e1..8e7d6269c30 100644
--- a/src/java/org/apache/lucene/store/LockReleaseFailedException.java
+++ b/src/java/org/apache/lucene/store/LockReleaseFailedException.java
@@ -22,7 +22,7 @@ import java.io.IOException;
/**
* This exception is thrown when the write.lock
* could not be released.
- * @see Lock#release().
+ * @see Lock#release()
*/
public class LockReleaseFailedException extends IOException {
public LockReleaseFailedException(String message) {
diff --git a/src/java/org/apache/lucene/store/MMapDirectory.java b/src/java/org/apache/lucene/store/MMapDirectory.java
index aaa805cbbd5..c12e8232ffd 100644
--- a/src/java/org/apache/lucene/store/MMapDirectory.java
+++ b/src/java/org/apache/lucene/store/MMapDirectory.java
@@ -375,7 +375,7 @@ public class MMapDirectory extends FSDirectory {
RuntimeException newException = new RuntimeException(ioe);
newException.initCause(ioe);
throw newException;
- };
+ }
return clone;
}
@@ -404,7 +404,7 @@ public class MMapDirectory extends FSDirectory {
File f = new File(getDirectory(), name);
RandomAccessFile raf = new RandomAccessFile(f, "r");
try {
- return (raf.length() <= (long) maxBBuf)
+ return (raf.length() <= maxBBuf)
? (IndexInput) new MMapIndexInput(raf)
: (IndexInput) new MultiMMapIndexInput(raf, maxBBuf);
} finally {
diff --git a/src/java/org/apache/lucene/store/NoLockFactory.java b/src/java/org/apache/lucene/store/NoLockFactory.java
index f4d78da7acb..242e78205d7 100755
--- a/src/java/org/apache/lucene/store/NoLockFactory.java
+++ b/src/java/org/apache/lucene/store/NoLockFactory.java
@@ -52,8 +52,8 @@ public class NoLockFactory extends LockFactory {
}
@Override
- public void clearLock(String lockName) {};
-};
+ public void clearLock(String lockName) {}
+}
class NoLock extends Lock {
@Override
diff --git a/src/java/org/apache/lucene/store/SimpleFSLockFactory.java b/src/java/org/apache/lucene/store/SimpleFSLockFactory.java
index fd6986374a8..dc8d73fe390 100755
--- a/src/java/org/apache/lucene/store/SimpleFSLockFactory.java
+++ b/src/java/org/apache/lucene/store/SimpleFSLockFactory.java
@@ -100,7 +100,7 @@ public class SimpleFSLockFactory extends FSLockFactory {
}
}
}
-};
+}
class SimpleFSLock extends Lock {
diff --git a/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java b/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java
index 20c66e94a34..9264a4cd53f 100755
--- a/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java
+++ b/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java
@@ -51,7 +51,7 @@ public class SingleInstanceLockFactory extends LockFactory {
}
}
}
-};
+}
class SingleInstanceLock extends Lock {
diff --git a/src/java/org/apache/lucene/util/AverageGuessMemoryModel.java b/src/java/org/apache/lucene/util/AverageGuessMemoryModel.java
index 4694cda4dec..29858c81f28 100644
--- a/src/java/org/apache/lucene/util/AverageGuessMemoryModel.java
+++ b/src/java/org/apache/lucene/util/AverageGuessMemoryModel.java
@@ -26,7 +26,7 @@ import java.util.Map;
*/
public class AverageGuessMemoryModel extends MemoryModel {
// best guess primitive sizes
- private final Map sizes = new IdentityHashMap() {
+ private final Map,Integer> sizes = new IdentityHashMap,Integer>() {
{
put(boolean.class, Integer.valueOf(1));
put(byte.class, Integer.valueOf(1));
@@ -63,7 +63,7 @@ public class AverageGuessMemoryModel extends MemoryModel {
* @see org.apache.lucene.util.MemoryModel#getPrimitiveSize(java.lang.Class)
*/
@Override
- public int getPrimitiveSize(Class clazz) {
+ public int getPrimitiveSize(Class> clazz) {
return sizes.get(clazz).intValue();
}
diff --git a/src/java/org/apache/lucene/util/IndexableBinaryStringTools.java b/src/java/org/apache/lucene/util/IndexableBinaryStringTools.java
index 8a32ece97c4..d42c3084c7f 100644
--- a/src/java/org/apache/lucene/util/IndexableBinaryStringTools.java
+++ b/src/java/org/apache/lucene/util/IndexableBinaryStringTools.java
@@ -104,7 +104,7 @@ public class IndexableBinaryStringTools {
public static int getEncodedLength(byte[] inputArray, int inputOffset,
int inputLength) {
// Use long for intermediaries to protect against overflow
- return (int)(((long)inputLength * 8L + 14L) / 15L) + 1;
+ return (int)((8L * inputLength + 14L) / 15L) + 1;
}
diff --git a/src/java/org/apache/lucene/util/MemoryModel.java b/src/java/org/apache/lucene/util/MemoryModel.java
index ed422d2f81c..9ea0fdc5709 100644
--- a/src/java/org/apache/lucene/util/MemoryModel.java
+++ b/src/java/org/apache/lucene/util/MemoryModel.java
@@ -38,7 +38,7 @@ public abstract class MemoryModel {
* short, double, int
* @return the size in bytes of given primitive Class
*/
- public abstract int getPrimitiveSize(Class clazz);
+ public abstract int getPrimitiveSize(Class> clazz);
/**
* @return size of reference
diff --git a/src/java/org/apache/lucene/util/NumericUtils.java b/src/java/org/apache/lucene/util/NumericUtils.java
index 39bb886f0aa..24272937480 100644
--- a/src/java/org/apache/lucene/util/NumericUtils.java
+++ b/src/java/org/apache/lucene/util/NumericUtils.java
@@ -210,10 +210,10 @@ public final class NumericUtils {
if (ch>0x7f) {
throw new NumberFormatException(
"Invalid prefixCoded numerical value representation (char "+
- Integer.toHexString((int)ch)+" at position "+i+" is invalid)"
+ Integer.toHexString(ch)+" at position "+i+" is invalid)"
);
}
- sortableBits |= (long)ch;
+ sortableBits |= ch;
}
return (sortableBits << shift) ^ 0x8000000000000000L;
}
@@ -237,10 +237,10 @@ public final class NumericUtils {
if (ch>0x7f) {
throw new NumberFormatException(
"Invalid prefixCoded numerical value representation (char "+
- Integer.toHexString((int)ch)+" at position "+i+" is invalid)"
+ Integer.toHexString(ch)+" at position "+i+" is invalid)"
);
}
- sortableBits |= (int)ch;
+ sortableBits |= ch;
}
return (sortableBits << shift) ^ 0x80000000;
}
@@ -346,7 +346,7 @@ public final class NumericUtils {
public static void splitIntRange(final IntRangeBuilder builder,
final int precisionStep, final int minBound, final int maxBound
) {
- splitRange(builder, 32, precisionStep, (long)minBound, (long)maxBound);
+ splitRange(builder, 32, precisionStep, minBound, maxBound);
}
/** This helper does the splitting for both 32 and 64 bit. */
diff --git a/src/java/org/apache/lucene/util/OpenBitSet.java b/src/java/org/apache/lucene/util/OpenBitSet.java
index ebead411533..6ee6b42b8af 100644
--- a/src/java/org/apache/lucene/util/OpenBitSet.java
+++ b/src/java/org/apache/lucene/util/OpenBitSet.java
@@ -642,7 +642,7 @@ public class OpenBitSet extends DocIdSet implements Cloneable, Serializable {
public Object clone() {
try {
OpenBitSet obs = (OpenBitSet)super.clone();
- obs.bits = (long[]) obs.bits.clone(); // hopefully an array clone is as fast(er) than arraycopy
+ obs.bits = obs.bits.clone(); // hopefully an array clone is as fast(er) than arraycopy
return obs;
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
diff --git a/src/java/org/apache/lucene/util/Parameter.java b/src/java/org/apache/lucene/util/Parameter.java
index 7629971bb3b..722afcb3d81 100644
--- a/src/java/org/apache/lucene/util/Parameter.java
+++ b/src/java/org/apache/lucene/util/Parameter.java
@@ -30,16 +30,11 @@ import java.util.Map;
*/
@Deprecated
@SuppressWarnings("serial")
-public abstract class Parameter implements Serializable
-{
+public abstract class Parameter implements Serializable {
static Map allParameters = new HashMap();
private String name;
- private Parameter() {
- // typesafe enum pattern, no public constructor
- }
-
protected Parameter(String name) {
// typesafe enum pattern, no public constructor
this.name = name;
diff --git a/src/java/org/apache/lucene/util/RamUsageEstimator.java b/src/java/org/apache/lucene/util/RamUsageEstimator.java
index 78f19198d7a..2313c409b89 100644
--- a/src/java/org/apache/lucene/util/RamUsageEstimator.java
+++ b/src/java/org/apache/lucene/util/RamUsageEstimator.java
@@ -124,7 +124,7 @@ public final class RamUsageEstimator {
// add to seen
seen.put(obj, null);
- Class clazz = obj.getClass();
+ Class> clazz = obj.getClass();
if (clazz.isArray()) {
return sizeOfArray(obj);
}
@@ -167,7 +167,7 @@ public final class RamUsageEstimator {
return 0;
}
long size = arraySize;
- Class arrayElementClazz = obj.getClass().getComponentType();
+ Class> arrayElementClazz = obj.getClass().getComponentType();
if (arrayElementClazz.isPrimitive()) {
size += len * memoryModel.getPrimitiveSize(arrayElementClazz);
} else {
diff --git a/src/test/org/apache/lucene/TestMergeSchedulerExternal.java b/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
index 8c4b80b9094..d2cf0b8a6f9 100644
--- a/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
+++ b/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
@@ -20,7 +20,6 @@ import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.store.Directory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.ConcurrentMergeScheduler;
@@ -38,14 +37,6 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
volatile boolean mergeThreadCreated;
volatile boolean excCalled;
- private class MyMergeException extends RuntimeException {
- Directory dir;
- public MyMergeException(Throwable exc, Directory dir) {
- super(exc);
- this.dir = dir;
- }
- }
-
private class MyMergeScheduler extends ConcurrentMergeScheduler {
private class MyMergeThread extends ConcurrentMergeScheduler.MergeThread {
@@ -99,7 +90,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
MyMergeScheduler ms = new MyMergeScheduler();
writer.setMergeScheduler(ms);
writer.setMaxBufferedDocs(2);
- writer.setRAMBufferSizeMB(writer.DISABLE_AUTO_FLUSH);
+ writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
for(int i=0;i<20;i++)
writer.addDocument(doc);
diff --git a/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java b/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
index d5c9594a5d8..a04408ab46d 100644
--- a/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
+++ b/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
@@ -1892,7 +1892,7 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase {
ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
Iterator expectedIter = expectedOutputTokens.iterator();
- while (expectedIter.hasNext()) {;
+ while (expectedIter.hasNext()) {
assertTermEquals(expectedIter.next(), filter, termAtt);
}
assertFalse(filter.incrementToken());
diff --git a/src/test/org/apache/lucene/analysis/TestAnalyzers.java b/src/test/org/apache/lucene/analysis/TestAnalyzers.java
index 1d7d19a8f99..06b1bb1bba8 100644
--- a/src/test/org/apache/lucene/analysis/TestAnalyzers.java
+++ b/src/test/org/apache/lucene/analysis/TestAnalyzers.java
@@ -107,6 +107,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
// LUCENE-1150: Just a compile time test, to ensure the
// StandardAnalyzer constants remain publicly accessible
+ @SuppressWarnings("unused")
public void _testStandardConstants() {
int x = StandardTokenizer.ALPHANUM;
x = StandardTokenizer.APOSTROPHE;
diff --git a/src/test/org/apache/lucene/analysis/TestCharArrayMap.java b/src/test/org/apache/lucene/analysis/TestCharArrayMap.java
index 5e5578a3dd2..4ad71f3c486 100644
--- a/src/test/org/apache/lucene/analysis/TestCharArrayMap.java
+++ b/src/test/org/apache/lucene/analysis/TestCharArrayMap.java
@@ -76,7 +76,8 @@ public class TestCharArrayMap extends LuceneTestCase {
int n=0;
for (Object o : cs) {
assertTrue(cm.containsKey(o));
- assertTrue(cm.containsKey((char[]) o));
+ char[] co = (char[]) o;
+ assertTrue(cm.containsKey(co, 0, co.length));
n++;
}
assertEquals(hm.size(), n);
diff --git a/src/test/org/apache/lucene/analysis/TestCharArraySet.java b/src/test/org/apache/lucene/analysis/TestCharArraySet.java
index bd46543c839..cbeaf74ff74 100755
--- a/src/test/org/apache/lucene/analysis/TestCharArraySet.java
+++ b/src/test/org/apache/lucene/analysis/TestCharArraySet.java
@@ -327,6 +327,7 @@ public class TestCharArraySet extends LuceneTestCase {
}
}
+ @SuppressWarnings("deprecated")
public void testCopyCharArraySetBWCompat() {
CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
@@ -341,6 +342,7 @@ public class TestCharArraySet extends LuceneTestCase {
setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
setCaseSensitive.add(Integer.valueOf(1));
+ // This should use the deprecated methods, because it checks a bw compatibility.
CharArraySet copy = CharArraySet.copy(setIngoreCase);
CharArraySet copyCaseSens = CharArraySet.copy(setCaseSensitive);
@@ -474,8 +476,9 @@ public class TestCharArraySet extends LuceneTestCase {
for (String stopword : TEST_STOP_WORDS) {
assertFalse(CharArraySet.EMPTY_SET.contains(stopword));
}
+ assertFalse(CharArraySet.EMPTY_SET.contains("foo"));
assertFalse(CharArraySet.EMPTY_SET.contains((Object) "foo"));
- assertFalse(CharArraySet.EMPTY_SET.contains((Object) "foo".toCharArray()));
+ assertFalse(CharArraySet.EMPTY_SET.contains("foo".toCharArray()));
assertFalse(CharArraySet.EMPTY_SET.contains("foo".toCharArray(),0,3));
}
diff --git a/src/test/org/apache/lucene/analysis/TestCharTokenizers.java b/src/test/org/apache/lucene/analysis/TestCharTokenizers.java
index c26880a135a..acf562d0e62 100644
--- a/src/test/org/apache/lucene/analysis/TestCharTokenizers.java
+++ b/src/test/org/apache/lucene/analysis/TestCharTokenizers.java
@@ -172,7 +172,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
return Character.isLetter(c);
}
- @Override
+ @Deprecated @Override
protected boolean isTokenChar(char c) {
return Character.isLetter(c);
}
@@ -183,7 +183,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
super(matchVersion, input);
}
- @Override
+ @Deprecated @Override
protected char normalize(char c) {
return c;
}
@@ -200,7 +200,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
super(matchVersion, input);
}
- @Override
+ @Deprecated @Override
protected char normalize(char c) {
return c;
}
@@ -215,7 +215,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
return Character.isLetter(c);
}
- @Override
+ @Deprecated @Override
protected boolean isTokenChar(char c) {
return Character.isLetter(c);
}
diff --git a/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java b/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
index 0c8cf1d258a..3986d246456 100644
--- a/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
+++ b/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
@@ -38,7 +38,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
private IndexSearcher searcher;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory,
diff --git a/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java b/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
index 52e48a53061..7f983214a7d 100644
--- a/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
+++ b/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
@@ -24,7 +24,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
NormalizeCharMap normMap;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
normMap = new NormalizeCharMap();
diff --git a/src/test/org/apache/lucene/analysis/TestToken.java b/src/test/org/apache/lucene/analysis/TestToken.java
index 1552f70d8f7..6d259adc468 100644
--- a/src/test/org/apache/lucene/analysis/TestToken.java
+++ b/src/test/org/apache/lucene/analysis/TestToken.java
@@ -35,7 +35,6 @@ public class TestToken extends LuceneTestCase {
Token t = new Token();
char[] content = "hello".toCharArray();
t.setTermBuffer(content, 0, content.length);
- char[] buf = t.termBuffer();
assertNotSame(t.termBuffer(), content);
assertEquals("hello", t.term());
assertEquals("word", t.type());
diff --git a/src/test/org/apache/lucene/collation/CollationTestBase.java b/src/test/org/apache/lucene/collation/CollationTestBase.java
index 69f9e3afd1f..935234b795c 100644
--- a/src/test/org/apache/lucene/collation/CollationTestBase.java
+++ b/src/test/org/apache/lucene/collation/CollationTestBase.java
@@ -40,9 +40,6 @@ import org.apache.lucene.util.IndexableBinaryStringTools;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
-import java.nio.CharBuffer;
-import java.nio.ByteBuffer;
-
public class CollationTestBase extends LuceneTestCase {
@@ -60,13 +57,11 @@ public class CollationTestBase extends LuceneTestCase {
* @return The encoded collation key for the original String
*/
protected String encodeCollationKey(byte[] keyBits) {
- ByteBuffer begBuf = ByteBuffer.wrap(keyBits);
// Ensure that the backing char[] array is large enough to hold the encoded
// Binary String
- char[] encodedBegArray
- = new char[IndexableBinaryStringTools.getEncodedLength(begBuf)];
- CharBuffer encodedBegBuf = CharBuffer.wrap(encodedBegArray);
- IndexableBinaryStringTools.encode(begBuf, encodedBegBuf);
+ int encodedLength = IndexableBinaryStringTools.getEncodedLength(keyBits, 0, keyBits.length);
+ char[] encodedBegArray = new char[encodedLength];
+ IndexableBinaryStringTools.encode(keyBits, 0, keyBits.length, encodedBegArray, 0, encodedLength);
return new String(encodedBegArray);
}
diff --git a/src/test/org/apache/lucene/collation/TestCollationKeyFilter.java b/src/test/org/apache/lucene/collation/TestCollationKeyFilter.java
index 401591e5af1..f26ebe3e2bb 100644
--- a/src/test/org/apache/lucene/collation/TestCollationKeyFilter.java
+++ b/src/test/org/apache/lucene/collation/TestCollationKeyFilter.java
@@ -46,16 +46,16 @@ public class TestCollationKeyFilter extends CollationTestBase {
public class TestAnalyzer extends Analyzer {
- private Collator collator;
+ private Collator _collator;
TestAnalyzer(Collator collator) {
- this.collator = collator;
+ _collator = collator;
}
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new KeywordTokenizer(reader);
- result = new CollationKeyFilter(result, collator);
+ result = new CollationKeyFilter(result, _collator);
return result;
}
}
diff --git a/src/test/org/apache/lucene/document/TestBinaryDocument.java b/src/test/org/apache/lucene/document/TestBinaryDocument.java
index 64f77b583a0..759036da441 100644
--- a/src/test/org/apache/lucene/document/TestBinaryDocument.java
+++ b/src/test/org/apache/lucene/document/TestBinaryDocument.java
@@ -36,7 +36,7 @@ public class TestBinaryDocument extends LuceneTestCase
public void testBinaryFieldInIndex()
throws Exception
{
- Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes(), Field.Store.YES);
+ Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes());
Fieldable stringFldStored = new Field("stringStored", binaryValStored, Field.Store.YES, Field.Index.NO, Field.TermVector.NO);
try {
@@ -45,7 +45,6 @@ public class TestBinaryDocument extends LuceneTestCase
fail();
}
catch (IllegalArgumentException iae) {
- ;
}
Document doc = new Document();
@@ -87,8 +86,8 @@ public class TestBinaryDocument extends LuceneTestCase
public void testCompressionTools()
throws Exception
{
- Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes()), Field.Store.YES);
- Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed), Field.Store.YES);
+ Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes()));
+ Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed));
Document doc = new Document();
diff --git a/src/test/org/apache/lucene/document/TestDocument.java b/src/test/org/apache/lucene/document/TestDocument.java
index 558a27f7b48..ad7cd25e279 100644
--- a/src/test/org/apache/lucene/document/TestDocument.java
+++ b/src/test/org/apache/lucene/document/TestDocument.java
@@ -42,8 +42,8 @@ public class TestDocument extends LuceneTestCase
{
Document doc = new Document();
Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, Field.Index.NO);
- Fieldable binaryFld = new Field("binary", binaryVal.getBytes(), Field.Store.YES);
- Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes(), Field.Store.YES);
+ Fieldable binaryFld = new Field("binary", binaryVal.getBytes());
+ Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes());
doc.add(stringFld);
doc.add(binaryFld);
@@ -259,8 +259,7 @@ public class TestDocument extends LuceneTestCase
}
public void testFieldSetValueChangeBinary() {
- Field field1 = new Field("field1", new byte[0],
- Field.Store.YES);
+ Field field1 = new Field("field1", new byte[0]);
Field field2 = new Field("field2", "",
Field.Store.YES, Field.Index.ANALYZED);
try {
diff --git a/src/test/org/apache/lucene/index/DocHelper.java b/src/test/org/apache/lucene/index/DocHelper.java
index a21cbc9b840..234c8740dec 100644
--- a/src/test/org/apache/lucene/index/DocHelper.java
+++ b/src/test/org/apache/lucene/index/DocHelper.java
@@ -154,7 +154,7 @@ class DocHelper {
LAZY_FIELD_BINARY_BYTES = "These are some binary field bytes".getBytes("UTF8");
} catch (UnsupportedEncodingException e) {
}
- lazyFieldBinary = new Field(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES, Field.Store.YES);
+ lazyFieldBinary = new Field(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES);
fields[fields.length - 2] = lazyFieldBinary;
LARGE_LAZY_FIELD_TEXT = buffer.toString();
largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED);
diff --git a/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/src/test/org/apache/lucene/index/TestAtomicUpdate.java
index 7e35462653e..570a1c2b63c 100644
--- a/src/test/org/apache/lucene/index/TestAtomicUpdate.java
+++ b/src/test/org/apache/lucene/index/TestAtomicUpdate.java
@@ -85,8 +85,6 @@ public class TestAtomicUpdate extends LuceneTestCase {
private static class IndexerThread extends TimedThread {
IndexWriter writer;
- public int count;
-
public IndexerThread(IndexWriter writer, TimedThread[] threads) {
super(threads);
this.writer = writer;
diff --git a/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 72258d2425a..b174e9532b7 100644
--- a/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -78,11 +78,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase
first */
public void unzip(String zipName, String destDirName) throws IOException {
- Enumeration entries;
- ZipFile zipFile;
- zipFile = new ZipFile(zipName + ".zip");
+ ZipFile zipFile = new ZipFile(zipName + ".zip");
- entries = zipFile.entries();
+ Enumeration extends ZipEntry> entries = zipFile.entries();
String dirName = fullDir(destDirName);
@@ -92,7 +90,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
fileDir.mkdir();
while (entries.hasMoreElements()) {
- ZipEntry entry = (ZipEntry) entries.nextElement();
+ ZipEntry entry = entries.nextElement();
InputStream in = zipFile.getInputStream(entry);
OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(fileDir, entry.getName())));
diff --git a/src/test/org/apache/lucene/index/TestCompoundFile.java b/src/test/org/apache/lucene/index/TestCompoundFile.java
index 4bbb5fc3a1e..c69c2761a8d 100644
--- a/src/test/org/apache/lucene/index/TestCompoundFile.java
+++ b/src/test/org/apache/lucene/index/TestCompoundFile.java
@@ -55,7 +55,7 @@ public class TestCompoundFile extends LuceneTestCase
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
File file = new File(System.getProperty("tempDir"), "testIndex");
_TestUtil.rmDir(file);
@@ -64,7 +64,7 @@ public class TestCompoundFile extends LuceneTestCase
}
@Override
- public void tearDown() throws Exception {
+ protected void tearDown() throws Exception {
dir.close();
super.tearDown();
}
@@ -329,14 +329,14 @@ public class TestCompoundFile extends LuceneTestCase
IndexInput in = fsdir.openInput(file);
// This read primes the buffer in IndexInput
- byte b = in.readByte();
+ in.readByte();
// Close the file
in.close();
// ERROR: this call should fail, but succeeds because the buffer
// is still filled
- b = in.readByte();
+ in.readByte();
// ERROR: this call should fail, but succeeds for some reason as well
in.seek(1099);
@@ -344,7 +344,7 @@ public class TestCompoundFile extends LuceneTestCase
try {
// OK: this call correctly fails. We are now past the 1024 internal
// buffer, so an actual IO is attempted, which fails
- b = in.readByte();
+ in.readByte();
fail("expected readByte() to throw exception");
} catch (IOException e) {
// expected exception
@@ -587,7 +587,7 @@ public class TestCompoundFile extends LuceneTestCase
// Open two files
try {
- IndexInput e1 = cr.openInput("bogus");
+ cr.openInput("bogus");
fail("File not found");
} catch (IOException e) {
@@ -608,7 +608,7 @@ public class TestCompoundFile extends LuceneTestCase
is.readBytes(b, 0, 10);
try {
- byte test = is.readByte();
+ is.readByte();
fail("Single byte read past end of file");
} catch (IOException e) {
/* success */
diff --git a/src/test/org/apache/lucene/index/TestDirectoryReader.java b/src/test/org/apache/lucene/index/TestDirectoryReader.java
index 137436ac48b..5dd914a440c 100644
--- a/src/test/org/apache/lucene/index/TestDirectoryReader.java
+++ b/src/test/org/apache/lucene/index/TestDirectoryReader.java
@@ -122,13 +122,6 @@ public class TestDirectoryReader extends LuceneTestCase {
assertEquals( 1, reader.numDocs() );
}
-
- public void _testTermVectors() {
- MultiReader reader = new MultiReader(readers);
- assertTrue(reader != null);
- }
-
-
public void testIsCurrent() throws IOException {
RAMDirectory ramDir1=new RAMDirectory();
addDoc(ramDir1, "test foo", true);
diff --git a/src/test/org/apache/lucene/index/TestDoc.java b/src/test/org/apache/lucene/index/TestDoc.java
index c0a7a0435d3..85abe2351d5 100644
--- a/src/test/org/apache/lucene/index/TestDoc.java
+++ b/src/test/org/apache/lucene/index/TestDoc.java
@@ -55,7 +55,7 @@ public class TestDoc extends LuceneTestCase {
* a few text files created in the current working directory.
*/
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
workDir = new File(System.getProperty("tempDir"),"TestDoc");
workDir.mkdirs();
diff --git a/src/test/org/apache/lucene/index/TestFieldsReader.java b/src/test/org/apache/lucene/index/TestFieldsReader.java
index 55a2bb89f42..2e3c11dfd95 100644
--- a/src/test/org/apache/lucene/index/TestFieldsReader.java
+++ b/src/test/org/apache/lucene/index/TestFieldsReader.java
@@ -60,7 +60,6 @@ public class TestFieldsReader extends LuceneTestCase {
assertTrue(dir != null);
assertTrue(fieldInfos != null);
FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
- assertTrue(reader != null);
assertTrue(reader.size() == 1);
Document doc = reader.doc(0, null);
assertTrue(doc != null);
@@ -98,7 +97,6 @@ public class TestFieldsReader extends LuceneTestCase {
assertTrue(dir != null);
assertTrue(fieldInfos != null);
FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
- assertTrue(reader != null);
assertTrue(reader.size() == 1);
Set loadFieldNames = new HashSet();
loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
@@ -148,7 +146,6 @@ public class TestFieldsReader extends LuceneTestCase {
assertTrue(dir != null);
assertTrue(fieldInfos != null);
FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
- assertTrue(reader != null);
assertTrue(reader.size() == 1);
Set loadFieldNames = new HashSet();
loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
@@ -177,7 +174,6 @@ public class TestFieldsReader extends LuceneTestCase {
assertTrue(dir != null);
assertTrue(fieldInfos != null);
FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
- assertTrue(reader != null);
assertTrue(reader.size() == 1);
LoadFirstFieldSelector fieldSelector = new LoadFirstFieldSelector();
Document doc = reader.doc(0, fieldSelector);
@@ -227,13 +223,13 @@ public class TestFieldsReader extends LuceneTestCase {
for (int i = 0; i < length; i++) {
reader = new FieldsReader(tmpDir, TEST_SEGMENT_NAME, fieldInfos);
- assertTrue(reader != null);
assertTrue(reader.size() == 1);
Document doc;
doc = reader.doc(0, null);//Load all of them
assertTrue("doc is null and it shouldn't be", doc != null);
Fieldable field = doc.getFieldable(DocHelper.LARGE_LAZY_FIELD_KEY);
+ assertTrue("field is null and it shouldn't be", field != null);
assertTrue("field is lazy", field.isLazy() == false);
String value;
long start;
@@ -243,7 +239,6 @@ public class TestFieldsReader extends LuceneTestCase {
value = field.stringValue();
finish = System.currentTimeMillis();
assertTrue("value is null and it shouldn't be", value != null);
- assertTrue("field is null and it shouldn't be", field != null);
regularTime += (finish - start);
reader.close();
reader = null;
diff --git a/src/test/org/apache/lucene/index/TestIndexReader.java b/src/test/org/apache/lucene/index/TestIndexReader.java
index db210477ba0..852aa9b681b 100644
--- a/src/test/org/apache/lucene/index/TestIndexReader.java
+++ b/src/test/org/apache/lucene/index/TestIndexReader.java
@@ -366,7 +366,7 @@ public class TestIndexReader extends LuceneTestCase
writer.close();
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("bin1", bin, Field.Store.YES));
+ doc.add(new Field("bin1", bin));
doc.add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@@ -511,11 +511,8 @@ public class TestIndexReader extends LuceneTestCase
// Make sure you can set norms & commit even if a reader
// is open against the index:
- public void testWritingNorms() throws IOException
- {
+ public void testWritingNorms() throws IOException {
String tempDir = "target/test";
- if (tempDir == null)
- throw new IOException("tempDir undefined, cannot run test");
File indexDir = new File(tempDir, "lucenetestnormwriter");
Directory dir = FSDirectory.open(indexDir);
diff --git a/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java b/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
index 276e7579e28..ae34f3533da 100644
--- a/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
+++ b/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
@@ -247,11 +247,6 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
iw.close();
}
- private void modifyNormsForF1(Directory dir) throws IOException {
- IndexReader ir = IndexReader.open(dir, false);
- modifyNormsForF1(ir);
- }
-
private void modifyNormsForF1(IndexReader ir) throws IOException {
int n = ir.maxDoc();
// System.out.println("modifyNormsForF1 maxDoc: "+n);
diff --git a/src/test/org/apache/lucene/index/TestIndexWriter.java b/src/test/org/apache/lucene/index/TestIndexWriter.java
index 8d199655639..cd04f693ff7 100644
--- a/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -3493,14 +3493,14 @@ public class TestIndexWriter extends LuceneTestCase {
final TermAttribute termAtt = addAttribute(TermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
- final Iterator tokens = Arrays.asList(new String[]{"a","b","c"}).iterator();
+ final Iterator terms = Arrays.asList("a","b","c").iterator();
boolean first = true;
@Override
public boolean incrementToken() {
- if (!tokens.hasNext()) return false;
+ if (!terms.hasNext()) return false;
clearAttributes();
- termAtt.setTermBuffer( tokens.next());
+ termAtt.setTermBuffer( terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
@@ -3784,7 +3784,7 @@ public class TestIndexWriter extends LuceneTestCase {
c.joinThreads();
- assertEquals(100+NUM_COPY*(3*NUM_ITER/4)*c.NUM_THREADS*c.NUM_INIT_DOCS, c.writer2.numDocs());
+ assertEquals(100+NUM_COPY*(3*NUM_ITER/4)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS, c.writer2.numDocs());
c.close(true);
@@ -3793,7 +3793,7 @@ public class TestIndexWriter extends LuceneTestCase {
_TestUtil.checkIndex(c.dir2);
IndexReader reader = IndexReader.open(c.dir2, true);
- assertEquals(100+NUM_COPY*(3*NUM_ITER/4)*c.NUM_THREADS*c.NUM_INIT_DOCS, reader.numDocs());
+ assertEquals(100+NUM_COPY*(3*NUM_ITER/4)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS, reader.numDocs());
reader.close();
c.closeDir();
@@ -3969,7 +3969,7 @@ public class TestIndexWriter extends LuceneTestCase {
b[i] = (byte) (i+77);
Document doc = new Document();
- Field f = new Field("binary", b, 10, 17, Field.Store.YES);
+ Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
@@ -4516,7 +4516,7 @@ public class TestIndexWriter extends LuceneTestCase {
b[i] = (byte) (i+77);
Document doc = new Document();
- Field f = new Field("binary", b, 10, 17, Field.Store.YES);
+ Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field1")));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field2")));
@@ -4688,7 +4688,7 @@ public class TestIndexWriter extends LuceneTestCase {
for(int iter=0;iter<2;iter++) {
Directory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
diff --git a/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java b/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
index 86aac34b8a7..c139792e01a 100644
--- a/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
+++ b/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
@@ -34,7 +34,7 @@ public class TestIndexWriterLockRelease extends LuceneTestCase {
private java.io.File __test_dir;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
if (this.__test_dir == null) {
String tmp_dir = System.getProperty("java.io.tmpdir", "tmp");
@@ -52,8 +52,7 @@ public class TestIndexWriterLockRelease extends LuceneTestCase {
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
if (this.__test_dir != null) {
File[] files = this.__test_dir.listFiles();
@@ -69,16 +68,16 @@ public class TestIndexWriterLockRelease extends LuceneTestCase {
throw new IOException("unable to remove test directory \"" + this.__test_dir.getPath() + "\" (please remove by hand)");
}
}
+ super.tearDown();
}
public void testIndexWriterLockRelease() throws IOException {
- IndexWriter im;
FSDirectory dir = FSDirectory.open(this.__test_dir);
try {
- im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
} catch (FileNotFoundException e) {
try {
- im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
} catch (FileNotFoundException e1) {
}
} finally {
diff --git a/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/src/test/org/apache/lucene/index/TestIndexWriterReader.java
index 4ee1b93ec95..4ada6871424 100644
--- a/src/test/org/apache/lucene/index/TestIndexWriterReader.java
+++ b/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -20,7 +20,6 @@ import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.LinkedList;
import java.util.List;
import java.util.Random;
@@ -302,70 +301,6 @@ public class TestIndexWriterReader extends LuceneTestCase {
mainDir.close();
}
- private class DeleteThreads {
- final static int NUM_THREADS = 5;
- final Thread[] threads = new Thread[NUM_THREADS];
- IndexWriter mainWriter;
- List deletedTerms = new ArrayList();
- LinkedList toDeleteTerms = new LinkedList();
- Random random;
- final List failures = new ArrayList();
-
- public DeleteThreads(IndexWriter mainWriter) throws IOException {
- this.mainWriter = mainWriter;
- IndexReader reader = mainWriter.getReader();
- int maxDoc = reader.maxDoc();
- random = newRandom();
- int iter = random.nextInt(maxDoc);
- for (int x=0; x < iter; x++) {
- int doc = random.nextInt(iter);
- String id = reader.document(doc).get("id");
- toDeleteTerms.add(new Term("id", id));
- }
- }
-
- Term getDeleteTerm() {
- synchronized (toDeleteTerms) {
- return toDeleteTerms.removeFirst();
- }
- }
-
- void launchThreads(final int numIter) {
- for (int i = 0; i < NUM_THREADS; i++) {
- threads[i] = new Thread() {
- @Override
- public void run() {
- try {
- Term term = getDeleteTerm();
- mainWriter.deleteDocuments(term);
- synchronized (deletedTerms) {
- deletedTerms.add(term);
- }
- } catch (Throwable t) {
- handle(t);
- }
- }
- };
- }
- }
-
- void handle(Throwable t) {
- t.printStackTrace(System.out);
- synchronized (failures) {
- failures.add(t);
- }
- }
-
- void joinThreads() {
- for (int i = 0; i < NUM_THREADS; i++)
- try {
- threads[i].join();
- } catch (InterruptedException ie) {
- throw new ThreadInterruptedException(ie);
- }
- }
- }
-
private class AddDirectoriesThreads {
Directory addDir;
final static int NUM_THREADS = 5;
@@ -558,16 +493,15 @@ public class TestIndexWriterReader extends LuceneTestCase {
return doc;
}
- /**
+ /*
* Delete a document by term and return the doc id
*
- * @return
- *
* public static int deleteDocument(Term term, IndexWriter writer) throws
* IOException { IndexReader reader = writer.getReader(); TermDocs td =
* reader.termDocs(term); int doc = -1; //if (td.next()) { // doc = td.doc();
* //} //writer.deleteDocuments(term); td.close(); return doc; }
*/
+
public static void createIndex(Directory dir1, String indexName,
boolean multiSegment) throws IOException {
IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
diff --git a/src/test/org/apache/lucene/index/TestMultiReader.java b/src/test/org/apache/lucene/index/TestMultiReader.java
index 6fd0b49e36f..158c7f068dd 100644
--- a/src/test/org/apache/lucene/index/TestMultiReader.java
+++ b/src/test/org/apache/lucene/index/TestMultiReader.java
@@ -40,7 +40,6 @@ public class TestMultiReader extends TestDirectoryReader {
assertTrue(dir != null);
assertTrue(sis != null);
- assertTrue(reader != null);
return reader;
}
diff --git a/src/test/org/apache/lucene/index/TestNorms.java b/src/test/org/apache/lucene/index/TestNorms.java
index 17d789117f1..6b8e762628d 100755
--- a/src/test/org/apache/lucene/index/TestNorms.java
+++ b/src/test/org/apache/lucene/index/TestNorms.java
@@ -17,7 +17,8 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.ArrayList;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
@@ -29,10 +30,7 @@ import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Test that norms info is preserved during index life - including
@@ -158,8 +156,8 @@ public class TestNorms extends LuceneTestCase {
int n = ir.maxDoc();
for (int i = 0; i < n; i+=3) { // modify for every third doc
int k = (i*3) % modifiedNorms.size();
- float origNorm = ((Float)modifiedNorms.get(i)).floatValue();
- float newNorm = ((Float)modifiedNorms.get(k)).floatValue();
+ float origNorm = modifiedNorms.get(i).floatValue();
+ float newNorm = modifiedNorms.get(k).floatValue();
//System.out.println("Modifying: for "+i+" from "+origNorm+" to "+newNorm);
//System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
modifiedNorms.set(i, Float.valueOf(newNorm));
diff --git a/src/test/org/apache/lucene/index/TestParallelTermEnum.java b/src/test/org/apache/lucene/index/TestParallelTermEnum.java
index a793b065566..d6cf47e8586 100755
--- a/src/test/org/apache/lucene/index/TestParallelTermEnum.java
+++ b/src/test/org/apache/lucene/index/TestParallelTermEnum.java
@@ -67,10 +67,9 @@ public class TestParallelTermEnum extends LuceneTestCase {
@Override
protected void tearDown() throws Exception {
- super.tearDown();
-
ir1.close();
ir2.close();
+ super.tearDown();
}
public void test1() throws IOException {
diff --git a/src/test/org/apache/lucene/index/TestPayloads.java b/src/test/org/apache/lucene/index/TestPayloads.java
index 25a04f20ea8..c6383cc2b56 100644
--- a/src/test/org/apache/lucene/index/TestPayloads.java
+++ b/src/test/org/apache/lucene/index/TestPayloads.java
@@ -445,11 +445,8 @@ public class TestPayloads extends LuceneTestCase {
boolean hasNext = input.incrementToken();
if (hasNext) {
if (offset + length <= data.length) {
- Payload p = null;
- if (p == null) {
- p = new Payload();
- payloadAtt.setPayload(p);
- }
+ Payload p = new Payload();
+ payloadAtt.setPayload(p);
p.setData(data, offset, length);
offset += length;
} else {
diff --git a/src/test/org/apache/lucene/index/TestSegmentReader.java b/src/test/org/apache/lucene/index/TestSegmentReader.java
index fff8d9cba15..901791c1257 100644
--- a/src/test/org/apache/lucene/index/TestSegmentReader.java
+++ b/src/test/org/apache/lucene/index/TestSegmentReader.java
@@ -134,8 +134,8 @@ public class TestSegmentReader extends LuceneTestCase {
TermPositions positions = reader.termPositions();
- positions.seek(new Term(DocHelper.TEXT_FIELD_1_KEY, "field"));
assertTrue(positions != null);
+ positions.seek(new Term(DocHelper.TEXT_FIELD_1_KEY, "field"));
assertTrue(positions.doc() == 0);
assertTrue(positions.nextPosition() >= 0);
}
diff --git a/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
index 90baf959e73..067103d1493 100644
--- a/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
+++ b/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
@@ -57,7 +57,6 @@ public class TestSegmentTermDocs extends LuceneTestCase {
assertTrue(reader != null);
assertEquals(indexDivisor, reader.getTermInfosIndexDivisor());
SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
- assertTrue(segTermDocs != null);
segTermDocs.seek(new Term(DocHelper.TEXT_FIELD_2_KEY, "field"));
if (segTermDocs.next() == true)
{
@@ -79,7 +78,6 @@ public class TestSegmentTermDocs extends LuceneTestCase {
SegmentReader reader = SegmentReader.get(true, info, indexDivisor);
assertTrue(reader != null);
SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
- assertTrue(segTermDocs != null);
segTermDocs.seek(new Term("textField2", "bad"));
assertTrue(segTermDocs.next() == false);
reader.close();
@@ -89,7 +87,6 @@ public class TestSegmentTermDocs extends LuceneTestCase {
SegmentReader reader = SegmentReader.get(true, info, indexDivisor);
assertTrue(reader != null);
SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
- assertTrue(segTermDocs != null);
segTermDocs.seek(new Term("junk", "bad"));
assertTrue(segTermDocs.next() == false);
reader.close();
diff --git a/src/test/org/apache/lucene/index/TestStressIndexing.java b/src/test/org/apache/lucene/index/TestStressIndexing.java
index 8d6083a5c56..978bfb76346 100644
--- a/src/test/org/apache/lucene/index/TestStressIndexing.java
+++ b/src/test/org/apache/lucene/index/TestStressIndexing.java
@@ -70,7 +70,6 @@ public class TestStressIndexing extends LuceneTestCase {
private class IndexerThread extends TimedThread {
IndexWriter writer;
- public int count;
int nextID;
public IndexerThread(IndexWriter writer, TimedThread[] threads) {
diff --git a/src/test/org/apache/lucene/index/TestStressIndexing2.java b/src/test/org/apache/lucene/index/TestStressIndexing2.java
index 2d1209cfcd9..ba0096f8007 100644
--- a/src/test/org/apache/lucene/index/TestStressIndexing2.java
+++ b/src/test/org/apache/lucene/index/TestStressIndexing2.java
@@ -26,7 +26,7 @@ import org.apache.lucene.search.TermQuery;
import java.util.*;
import java.io.IOException;
-import junit.framework.TestCase;
+import junit.framework.Assert;
public class TestStressIndexing2 extends LuceneTestCase {
static int maxFields=4;
@@ -644,7 +644,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
} catch (Throwable e) {
e.printStackTrace();
- TestCase.fail(e.toString());
+ Assert.fail(e.toString());
}
synchronized (this) {
diff --git a/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/src/test/org/apache/lucene/index/TestTermVectorsReader.java
index 9bbdef5c0b7..1fd2188ba17 100644
--- a/src/test/org/apache/lucene/index/TestTermVectorsReader.java
+++ b/src/test/org/apache/lucene/index/TestTermVectorsReader.java
@@ -166,7 +166,6 @@ public class TestTermVectorsReader extends LuceneTestCase {
public void testReader() throws IOException {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
for (int j = 0; j < 5; j++) {
TermFreqVector vector = reader.get(j, testFields[0]);
assertTrue(vector != null);
@@ -183,7 +182,6 @@ public class TestTermVectorsReader extends LuceneTestCase {
public void testPositionReader() throws IOException {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
TermPositionVector vector;
String[] terms;
vector = (TermPositionVector) reader.get(0, testFields[0]);
@@ -226,7 +224,6 @@ public class TestTermVectorsReader extends LuceneTestCase {
public void testOffsetReader() throws IOException {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
TermPositionVector vector = (TermPositionVector) reader.get(0, testFields[0]);
assertTrue(vector != null);
String[] terms = vector.getTerms();
@@ -255,7 +252,6 @@ public class TestTermVectorsReader extends LuceneTestCase {
public void testMapper() throws IOException {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
reader.get(0, mapper);
SortedSet set = mapper.getTermVectorEntrySet();
@@ -376,7 +372,6 @@ public class TestTermVectorsReader extends LuceneTestCase {
public void testBadParams() {
try {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
//Bad document number, good field number
reader.get(50, testFields[0]);
fail();
@@ -385,7 +380,6 @@ public class TestTermVectorsReader extends LuceneTestCase {
}
try {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
//Bad document number, no field
reader.get(50);
fail();
@@ -394,7 +388,6 @@ public class TestTermVectorsReader extends LuceneTestCase {
}
try {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
//good document number, bad field number
TermFreqVector vector = reader.get(0, "f50");
assertTrue(vector == null);
diff --git a/src/test/org/apache/lucene/queryParser/TestQueryParser.java b/src/test/org/apache/lucene/queryParser/TestQueryParser.java
index 8dd6e94bfff..9dc3889f956 100644
--- a/src/test/org/apache/lucene/queryParser/TestQueryParser.java
+++ b/src/test/org/apache/lucene/queryParser/TestQueryParser.java
@@ -150,7 +150,7 @@ public class TestQueryParser extends LocalizedTestCase {
private int originalMaxClauses;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
originalMaxClauses = BooleanQuery.getMaxClauseCount();
}
@@ -1017,9 +1017,9 @@ public class TestQueryParser extends LocalizedTestCase {
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
BooleanQuery.setMaxClauseCount(originalMaxClauses);
+ super.tearDown();
}
// LUCENE-2002: make sure defaults for StandardAnalyzer's
diff --git a/src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java b/src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java
index 15607400fab..e0b78c2df45 100644
--- a/src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java
+++ b/src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java
@@ -20,8 +20,7 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.WeakHashMap;
-import junit.framework.TestCase;
-
+import junit.framework.Assert;
import org.apache.lucene.index.IndexReader;
/**
@@ -51,9 +50,9 @@ public class CachingWrapperFilterHelper extends CachingWrapperFilter {
synchronized (cache) { // check cache
DocIdSet cached = cache.get(reader);
if (shouldHaveCache) {
- TestCase.assertNotNull("Cache should have data ", cached);
+ Assert.assertNotNull("Cache should have data ", cached);
} else {
- TestCase.assertNull("Cache should be null " + cached , cached);
+ Assert.assertNull("Cache should be null " + cached , cached);
}
if (cached != null) {
return cached;
@@ -77,7 +76,7 @@ public class CachingWrapperFilterHelper extends CachingWrapperFilter {
@Override
public boolean equals(Object o) {
if (!(o instanceof CachingWrapperFilterHelper)) return false;
- return this.filter.equals((CachingWrapperFilterHelper)o);
+ return this.filter.equals(o);
}
@Override
diff --git a/src/test/org/apache/lucene/search/CheckHits.java b/src/test/org/apache/lucene/search/CheckHits.java
index bd2581d991a..75fd4e21100 100644
--- a/src/test/org/apache/lucene/search/CheckHits.java
+++ b/src/test/org/apache/lucene/search/CheckHits.java
@@ -76,7 +76,7 @@ public class CheckHits {
* @param searcher the searcher to test the query against
* @param defaultFieldName used for displaying the query in assertion messages
* @param results a list of documentIds that must match the query
- * @see Searcher#search(Query,HitCollector)
+ * @see Searcher#search(Query,Collector)
* @see #checkHits
*/
public static void checkHitCollector(Query query, String defaultFieldName,
@@ -149,7 +149,7 @@ public class CheckHits {
* @param searcher the searcher to test the query against
* @param defaultFieldName used for displaing the query in assertion messages
* @param results a list of documentIds that must match the query
- * @see Searcher#search(Query)
+ * @see Searcher#search(Query, int)
* @see #checkHitCollector
*/
public static void checkHits(
@@ -159,7 +159,7 @@ public class CheckHits {
int[] results)
throws IOException {
- ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+ ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
Set correct = new TreeSet();
for (int i = 0; i < results.length; i++) {
diff --git a/src/test/org/apache/lucene/search/JustCompileSearch.java b/src/test/org/apache/lucene/search/JustCompileSearch.java
index 5055165e35e..1d19029ef52 100644
--- a/src/test/org/apache/lucene/search/JustCompileSearch.java
+++ b/src/test/org/apache/lucene/search/JustCompileSearch.java
@@ -245,7 +245,7 @@ final class JustCompileSearch {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
diff --git a/src/test/org/apache/lucene/search/TestBoolean2.java b/src/test/org/apache/lucene/search/TestBoolean2.java
index b131654b558..35eb209e915 100644
--- a/src/test/org/apache/lucene/search/TestBoolean2.java
+++ b/src/test/org/apache/lucene/search/TestBoolean2.java
@@ -47,7 +47,7 @@ public class TestBoolean2 extends LuceneTestCase {
private int mulFactor;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
@@ -93,9 +93,10 @@ public class TestBoolean2 extends LuceneTestCase {
}
@Override
- public void tearDown() throws Exception {
+ protected void tearDown() throws Exception {
reader.close();
dir2.close();
+ super.tearDown();
}
private String[] docFields = {
diff --git a/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java b/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
index 308d68de167..2c593ff54b0 100644
--- a/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
+++ b/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
@@ -18,7 +18,6 @@ package org.apache.lucene.search;
*/
-import junit.framework.TestCase;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -42,7 +41,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
public IndexSearcher s;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
@@ -338,7 +337,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
// The constrained query
// should be a superset to the unconstrained query.
if (top2.totalHits > top1.totalHits) {
- TestCase.fail("Constrained results not a subset:\n"
+ fail("Constrained results not a subset:\n"
+ CheckHits.topdocsString(top1,0,0)
+ CheckHits.topdocsString(top2,0,0)
+ "for query:" + q2.toString());
@@ -355,7 +354,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
float otherScore = top1.scoreDocs[other].score;
// check if scores match
if (Math.abs(otherScore-score)>1.0e-6f) {
- TestCase.fail("Doc " + id + " scores don't match\n"
+ fail("Doc " + id + " scores don't match\n"
+ CheckHits.topdocsString(top1,0,0)
+ CheckHits.topdocsString(top2,0,0)
+ "for query:" + q2.toString());
@@ -364,7 +363,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
}
// check if subset
- if (!found) TestCase.fail("Doc " + id + " not found\n"
+ if (!found) fail("Doc " + id + " not found\n"
+ CheckHits.topdocsString(top1,0,0)
+ CheckHits.topdocsString(top2,0,0)
+ "for query:" + q2.toString());
diff --git a/src/test/org/apache/lucene/search/TestComplexExplanations.java b/src/test/org/apache/lucene/search/TestComplexExplanations.java
index da1d75668de..46ae50c0fdb 100644
--- a/src/test/org/apache/lucene/search/TestComplexExplanations.java
+++ b/src/test/org/apache/lucene/search/TestComplexExplanations.java
@@ -32,7 +32,7 @@ public class TestComplexExplanations extends TestExplanations {
* nice with boosts of 0.0
*/
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
searcher.setSimilarity(createQnorm1Similarity());
}
diff --git a/src/test/org/apache/lucene/search/TestCustomSearcherSort.java b/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
index 4aabcaa4b43..82a962cbfbc 100644
--- a/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
+++ b/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
@@ -93,7 +93,7 @@ implements Serializable {
* Create index and query for test cases.
*/
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
index = getIndex();
query = new TermQuery( new Term("content", "test"));
diff --git a/src/test/org/apache/lucene/search/TestDateSort.java b/src/test/org/apache/lucene/search/TestDateSort.java
index f062d95ebf9..98e7e0a9e48 100644
--- a/src/test/org/apache/lucene/search/TestDateSort.java
+++ b/src/test/org/apache/lucene/search/TestDateSort.java
@@ -46,7 +46,7 @@ public class TestDateSort extends LuceneTestCase {
private static Directory directory;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
// Create an index writer.
directory = new RAMDirectory();
diff --git a/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
index 9df48c2b03b..0ff804227ef 100644
--- a/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
+++ b/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -74,7 +74,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase{
public IndexSearcher s;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
index = new RAMDirectory();
diff --git a/src/test/org/apache/lucene/search/TestDocIdSet.java b/src/test/org/apache/lucene/search/TestDocIdSet.java
index 2a6a79bd21c..4610d5699ca 100644
--- a/src/test/org/apache/lucene/search/TestDocIdSet.java
+++ b/src/test/org/apache/lucene/search/TestDocIdSet.java
@@ -35,7 +35,6 @@ import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util._TestUtil;
public class TestDocIdSet extends LuceneTestCase {
public void testFilteredDocIdSet() throws Exception {
@@ -95,8 +94,8 @@ public class TestDocIdSet extends LuceneTestCase {
int[] answer = new int[]{4,6,8};
boolean same = Arrays.equals(answer, docs);
if (!same) {
- System.out.println("answer: "+_TestUtil.arrayToString(answer));
- System.out.println("gotten: "+_TestUtil.arrayToString(docs));
+ System.out.println("answer: " + Arrays.toString(answer));
+ System.out.println("gotten: " + Arrays.toString(docs));
fail();
}
}
diff --git a/src/test/org/apache/lucene/search/TestElevationComparator.java b/src/test/org/apache/lucene/search/TestElevationComparator.java
index 45a1a931c3a..905e21d4b16 100644
--- a/src/test/org/apache/lucene/search/TestElevationComparator.java
+++ b/src/test/org/apache/lucene/search/TestElevationComparator.java
@@ -21,6 +21,7 @@ import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.*;
+import org.apache.lucene.search.FieldValueHitQueue.Entry;
import org.apache.lucene.store.*;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
@@ -70,7 +71,7 @@ public class TestElevationComparator extends LuceneTestCase {
new SortField(null, SortField.SCORE, reversed)
);
- TopDocsCollector topCollector = TopFieldCollector.create(sort, 50, false, true, true, true);
+ TopDocsCollector topCollector = TopFieldCollector.create(sort, 50, false, true, true, true);
searcher.search(newq, null, topCollector);
TopDocs topDocs = topCollector.topDocs(0, 10);
@@ -171,7 +172,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Integer.valueOf(values[slot]);
}
};
diff --git a/src/test/org/apache/lucene/search/TestExplanations.java b/src/test/org/apache/lucene/search/TestExplanations.java
index 8771e214664..3b579af598d 100644
--- a/src/test/org/apache/lucene/search/TestExplanations.java
+++ b/src/test/org/apache/lucene/search/TestExplanations.java
@@ -54,13 +54,13 @@ public class TestExplanations extends LuceneTestCase {
new QueryParser(TEST_VERSION_CURRENT, FIELD, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
searcher.close();
+ super.tearDown();
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
diff --git a/src/test/org/apache/lucene/search/TestFieldCache.java b/src/test/org/apache/lucene/search/TestFieldCache.java
index 634384cce35..663adc3c20e 100644
--- a/src/test/org/apache/lucene/search/TestFieldCache.java
+++ b/src/test/org/apache/lucene/search/TestFieldCache.java
@@ -65,8 +65,8 @@ public class TestFieldCache extends LuceneTestCase {
FieldCache cache = FieldCache.DEFAULT;
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
cache.setInfoStream(new PrintStream(bos));
- double [] doubles = cache.getDoubles(reader, "theDouble");
- float [] floats = cache.getFloats(reader, "theDouble");
+ cache.getDoubles(reader, "theDouble");
+ cache.getFloats(reader, "theDouble");
assertTrue(bos.toString().indexOf("WARNING") != -1);
} finally {
FieldCache.DEFAULT.purgeAllCaches();
diff --git a/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java b/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
index 81da85dd600..02ce8dc4adc 100644
--- a/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
+++ b/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
@@ -479,7 +479,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
int numDocs = reader.numDocs();
Float minIdO = Float.valueOf(minId + .5f);
- Float medIdO = Float.valueOf(minIdO.floatValue() + ((float) (maxId-minId))/2.0f);
+ Float medIdO = Float.valueOf(minIdO.floatValue() + ((maxId-minId))/2.0f);
ScoreDoc[] result;
Query q = new TermQuery(new Term("body","body"));
@@ -507,7 +507,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
int numDocs = reader.numDocs();
Double minIdO = Double.valueOf(minId + .5);
- Double medIdO = Double.valueOf(minIdO.floatValue() + ((double) (maxId-minId))/2.0);
+ Double medIdO = Double.valueOf(minIdO.floatValue() + ((maxId-minId))/2.0);
ScoreDoc[] result;
Query q = new TermQuery(new Term("body","body"));
diff --git a/src/test/org/apache/lucene/search/TestFilteredQuery.java b/src/test/org/apache/lucene/search/TestFilteredQuery.java
index fe5555efc6a..2aabbfa7d74 100644
--- a/src/test/org/apache/lucene/search/TestFilteredQuery.java
+++ b/src/test/org/apache/lucene/search/TestFilteredQuery.java
@@ -45,7 +45,7 @@ public class TestFilteredQuery extends LuceneTestCase {
private Filter filter;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter (directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
@@ -92,8 +92,7 @@ public class TestFilteredQuery extends LuceneTestCase {
}
@Override
- public void tearDown()
- throws Exception {
+ protected void tearDown() throws Exception {
searcher.close();
directory.close();
super.tearDown();
diff --git a/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java b/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
index 30abf219c57..b5f4e5575c7 100644
--- a/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
+++ b/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
@@ -56,7 +56,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
diff --git a/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java b/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
index 12cad57deed..7846a119b50 100644
--- a/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
+++ b/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
@@ -38,7 +38,7 @@ public class TestMultiThreadTermVectors extends LuceneTestCase {
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
IndexWriter writer
= new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
diff --git a/src/test/org/apache/lucene/search/TestPhraseQuery.java b/src/test/org/apache/lucene/search/TestPhraseQuery.java
index d89dcca2186..b7738e6e0ef 100644
--- a/src/test/org/apache/lucene/search/TestPhraseQuery.java
+++ b/src/test/org/apache/lucene/search/TestPhraseQuery.java
@@ -86,10 +86,10 @@ public class TestPhraseQuery extends LuceneTestCase {
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
searcher.close();
directory.close();
+ super.tearDown();
}
public void testNotCloseEnough() throws Exception {
@@ -474,7 +474,7 @@ public class TestPhraseQuery extends LuceneTestCase {
query.add(new Term("palindrome", "three"));
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("just sloppy enough", 1, hits.length);
- float score2 = hits[0].score;
+ //float score2 = hits[0].score;
//System.out.println("palindrome: two three: "+score2);
QueryUtils.check(query,searcher);
@@ -488,7 +488,7 @@ public class TestPhraseQuery extends LuceneTestCase {
query.add(new Term("palindrome", "two"));
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("just sloppy enough", 1, hits.length);
- float score3 = hits[0].score;
+ //float score3 = hits[0].score;
//System.out.println("palindrome: three two: "+score3);
QueryUtils.check(query,searcher);
@@ -536,7 +536,7 @@ public class TestPhraseQuery extends LuceneTestCase {
query.add(new Term("palindrome", "three"));
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("just sloppy enough", 1, hits.length);
- float score2 = hits[0].score;
+ //float score2 = hits[0].score;
//System.out.println("palindrome: one two three: "+score2);
QueryUtils.check(query,searcher);
@@ -551,7 +551,7 @@ public class TestPhraseQuery extends LuceneTestCase {
query.add(new Term("palindrome", "one"));
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("just sloppy enough", 1, hits.length);
- float score3 = hits[0].score;
+ //float score3 = hits[0].score;
//System.out.println("palindrome: three two one: "+score3);
QueryUtils.check(query,searcher);
diff --git a/src/test/org/apache/lucene/search/TestQueryTermVector.java b/src/test/org/apache/lucene/search/TestQueryTermVector.java
index 74523a89db3..9d31b37fb9d 100644
--- a/src/test/org/apache/lucene/search/TestQueryTermVector.java
+++ b/src/test/org/apache/lucene/search/TestQueryTermVector.java
@@ -33,7 +33,6 @@ public class TestQueryTermVector extends LuceneTestCase {
String [] gold = {"again", "bar", "foo", "go"};
int [] goldFreqs = {1, 2, 3, 3};
QueryTermVector result = new QueryTermVector(queryTerm);
- assertTrue(result != null);
String [] terms = result.getTerms();
assertTrue(terms.length == 4);
int [] freq = result.getTermFrequencies();
@@ -43,7 +42,6 @@ public class TestQueryTermVector extends LuceneTestCase {
assertTrue(result.getTerms().length == 0);
result = new QueryTermVector("foo bar foo again foo bar go go go", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
- assertTrue(result != null);
terms = result.getTerms();
assertTrue(terms.length == 4);
freq = result.getTermFrequencies();
diff --git a/src/test/org/apache/lucene/search/TestScorerPerf.java b/src/test/org/apache/lucene/search/TestScorerPerf.java
index 38980170203..1a830b6f019 100755
--- a/src/test/org/apache/lucene/search/TestScorerPerf.java
+++ b/src/test/org/apache/lucene/search/TestScorerPerf.java
@@ -143,7 +143,7 @@ public class TestScorerPerf extends LuceneTestCase {
@Override
public DocIdSet getDocIdSet(IndexReader reader) {
return new DocIdBitSet(rnd);
- };
+ }
});
bq.add(q, BooleanClause.Occur.MUST);
if (validate) {
diff --git a/src/test/org/apache/lucene/search/TestSort.java b/src/test/org/apache/lucene/search/TestSort.java
index 71d4fc08bea..af2c9273207 100644
--- a/src/test/org/apache/lucene/search/TestSort.java
+++ b/src/test/org/apache/lucene/search/TestSort.java
@@ -37,6 +37,7 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.FieldValueHitQueue.Entry;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.DocIdBitSet;
@@ -207,7 +208,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
full = getFullIndex();
searchX = getXIndex();
@@ -446,7 +447,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Integer.valueOf(slotValues[slot]);
}
}
@@ -747,7 +748,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
Sort[] sort = new Sort[] { new Sort(SortField.FIELD_DOC), new Sort() };
for (int i = 0; i < sort.length; i++) {
Query q = new MatchAllDocsQuery();
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, false,
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, false,
false, false, true);
full.search(q, tdc);
@@ -766,7 +767,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
for (int i = 0; i < sort.length; i++) {
Query q = new MatchAllDocsQuery();
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, false,
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, false,
false, true);
full.search(q, tdc);
@@ -786,7 +787,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
for (int i = 0; i < sort.length; i++) {
Query q = new MatchAllDocsQuery();
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true,
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true,
false, true);
full.search(q, tdc);
@@ -806,7 +807,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
for (int i = 0; i < sort.length; i++) {
Query q = new MatchAllDocsQuery();
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true,
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true,
true, true);
full.search(q, tdc);
@@ -854,7 +855,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
bq.setMinimumNumberShouldMatch(1);
for (int i = 0; i < sort.length; i++) {
for (int j = 0; j < tfcOptions.length; j++) {
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10,
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10,
tfcOptions[j][0], tfcOptions[j][1], tfcOptions[j][2], false);
assertTrue(tdc.getClass().getName().endsWith("$"+actualTFCClasses[j]));
@@ -873,7 +874,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
// Two Sort criteria to instantiate the multi/single comparators.
Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
for (int i = 0; i < sort.length; i++) {
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true, true, true);
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true, true, true);
TopDocs td = tdc.topDocs();
assertEquals(0, td.totalHits);
assertTrue(Float.isNaN(td.getMaxScore()));
diff --git a/src/test/org/apache/lucene/search/TestTermRangeFilter.java b/src/test/org/apache/lucene/search/TestTermRangeFilter.java
index 70ef5155307..4c2f33f302a 100644
--- a/src/test/org/apache/lucene/search/TestTermRangeFilter.java
+++ b/src/test/org/apache/lucene/search/TestTermRangeFilter.java
@@ -400,8 +400,6 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
Query q = new TermQuery(new Term("body","body"));
Collator collator = Collator.getInstance(new Locale("da", "dk"));
- Query query = new TermRangeQuery
- ("content", "H\u00D8T", "MAND", false, false, collator);
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
diff --git a/src/test/org/apache/lucene/search/TestTermRangeQuery.java b/src/test/org/apache/lucene/search/TestTermRangeQuery.java
index 36b6b7d0c4b..86f24bdb5f7 100644
--- a/src/test/org/apache/lucene/search/TestTermRangeQuery.java
+++ b/src/test/org/apache/lucene/search/TestTermRangeQuery.java
@@ -43,7 +43,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
private RAMDirectory dir;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
dir = new RAMDirectory();
}
diff --git a/src/test/org/apache/lucene/search/TestTermVectors.java b/src/test/org/apache/lucene/search/TestTermVectors.java
index 9e4766cf95e..6389aacb805 100644
--- a/src/test/org/apache/lucene/search/TestTermVectors.java
+++ b/src/test/org/apache/lucene/search/TestTermVectors.java
@@ -39,7 +39,7 @@ public class TestTermVectors extends LuceneTestCase {
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
@@ -233,7 +233,6 @@ public class TestTermVectors extends LuceneTestCase {
try {
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
- assertTrue(writer != null);
writer.addDocument(testDoc1);
writer.addDocument(testDoc2);
writer.addDocument(testDoc3);
diff --git a/src/test/org/apache/lucene/search/TestThreadSafe.java b/src/test/org/apache/lucene/search/TestThreadSafe.java
index 639f14edc27..c6623813671 100755
--- a/src/test/org/apache/lucene/search/TestThreadSafe.java
+++ b/src/test/org/apache/lucene/search/TestThreadSafe.java
@@ -16,7 +16,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import junit.framework.TestCase;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
@@ -65,7 +64,7 @@ public class TestThreadSafe extends LuceneTestCase {
}
} catch (Throwable th) {
failure=th.toString();
- TestCase.fail(failure);
+ fail(failure);
}
}
@@ -135,7 +134,7 @@ public class TestThreadSafe extends LuceneTestCase {
tarr[i].join();
}
if (failure!=null) {
- TestCase.fail(failure);
+ fail(failure);
}
}
diff --git a/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
index 016a84040bc..5acbba86e13 100644
--- a/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
+++ b/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
@@ -97,7 +97,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase {
}
@Override
- public void tearDown() throws Exception {
+ protected void tearDown() throws Exception {
searcher.close();
super.tearDown();
}
diff --git a/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java b/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
index 0965e795563..c6cbc59096a 100755
--- a/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
+++ b/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
@@ -84,7 +84,6 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
}
// must have static class otherwise serialization tests fail
- @SuppressWarnings({"SerializableHasSerializationMethods", "serial"})
private static class CustomAddQuery extends CustomScoreQuery {
// constructor
CustomAddQuery(Query q, ValueSourceQuery qValSrc) {
@@ -120,7 +119,6 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
}
// must have static class otherwise serialization tests fail
- @SuppressWarnings({"SerializableHasSerializationMethods", "serial"})
private static class CustomMulAddQuery extends CustomScoreQuery {
// constructor
CustomMulAddQuery(Query q, ValueSourceQuery qValSrc1, ValueSourceQuery qValSrc2) {
@@ -179,7 +177,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
@Override
public float customScore(int doc, float subScore, float valSrcScore) throws IOException {
assertTrue(doc <= reader.maxDoc());
- return (float) values[doc];
+ return values[doc];
}
};
}
@@ -224,7 +222,6 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
original = new CustomScoreQuery(q);
rewritten = (CustomScoreQuery) original.rewrite(s.getIndexReader());
assertTrue("rewritten query should not be identical, as TermRangeQuery rewrites", original != rewritten);
- assertTrue("rewritten query should be a CustomScoreQuery", rewritten instanceof CustomScoreQuery);
assertTrue("no hits for query", s.search(rewritten,1).totalHits > 0);
assertEquals(s.search(q,1).totalHits, s.search(original,1).totalHits);
assertEquals(s.search(q,1).totalHits, s.search(rewritten,1).totalHits);
diff --git a/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java b/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java
index 8df6b829010..1163b78557b 100755
--- a/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java
+++ b/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java
@@ -39,7 +39,6 @@ import static org.junit.Assert.*;
*
* The exact score tests use TopDocs top to verify the exact score.
*/
-@SuppressWarnings({"UseOfSystemOutOrSystemErr"})
public class TestFieldScoreQuery extends FunctionTestSetup {
/* @override constructor */
diff --git a/src/test/org/apache/lucene/search/function/TestOrdValues.java b/src/test/org/apache/lucene/search/function/TestOrdValues.java
index 1f01052ce21..8f06b92f51a 100644
--- a/src/test/org/apache/lucene/search/function/TestOrdValues.java
+++ b/src/test/org/apache/lucene/search/function/TestOrdValues.java
@@ -34,7 +34,6 @@ import org.junit.Test;
*
* The exact score tests use TopDocs top to verify the exact score.
*/
-@SuppressWarnings({"UseOfSystemOutOrSystemErr"})
public class TestOrdValues extends FunctionTestSetup {
/* @override constructor */
diff --git a/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java b/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
index 5b6e06d984d..f1bea785558 100644
--- a/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
+++ b/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
@@ -188,10 +188,10 @@ public class TestPayloadNearQuery extends LuceneTestCase {
TopDocs hits;
query = newPhraseQuery("field", "nine hundred ninety nine", true);
hits = searcher.search(query, null, 100);
+ assertTrue("hits is null and it shouldn't be", hits != null);
ScoreDoc doc = hits.scoreDocs[0];
// System.out.println("Doc: " + doc.toString());
// System.out.println("Explain: " + searcher.explain(query, doc.doc));
- assertTrue("hits is null and it shouldn't be", hits != null);
assertTrue("there should only be one hit", hits.totalHits == 1);
// should have score = 3 because adjacent terms have payloads of 2,4
assertTrue(doc.score + " does not equal: " + 3, doc.score == 3);
diff --git a/src/test/org/apache/lucene/search/spans/TestBasics.java b/src/test/org/apache/lucene/search/spans/TestBasics.java
index e754c236f41..c7c74a61337 100644
--- a/src/test/org/apache/lucene/search/spans/TestBasics.java
+++ b/src/test/org/apache/lucene/search/spans/TestBasics.java
@@ -52,7 +52,7 @@ public class TestBasics extends LuceneTestCase {
private IndexSearcher searcher;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
diff --git a/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java b/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
index 606d33ebf97..70d70ac9c10 100644
--- a/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
+++ b/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
@@ -50,7 +50,7 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
protected IndexSearcher searcher;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory,
@@ -116,9 +116,9 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
searcher.close();
+ super.tearDown();
}
protected void check(SpanQuery q, int[] docs) throws Exception {
diff --git a/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
index 4de67550b25..c23eeda85c0 100644
--- a/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
+++ b/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
@@ -39,13 +39,13 @@ public class TestNearSpansOrdered extends LuceneTestCase {
new QueryParser(TEST_VERSION_CURRENT, FIELD, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
searcher.close();
+ super.tearDown();
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
diff --git a/src/test/org/apache/lucene/search/spans/TestSpans.java b/src/test/org/apache/lucene/search/spans/TestSpans.java
index f3f6f644be6..61d261195ae 100644
--- a/src/test/org/apache/lucene/search/spans/TestSpans.java
+++ b/src/test/org/apache/lucene/search/spans/TestSpans.java
@@ -46,7 +46,7 @@ public class TestSpans extends LuceneTestCase {
public static final String field = "field";
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
diff --git a/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java b/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
index 8c388051b3a..6b054ab1dcc 100644
--- a/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
+++ b/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
@@ -38,7 +38,7 @@ import org.apache.lucene.store.RAMDirectory;
public class TestSpansAdvanced extends LuceneTestCase {
// location to the index
- protected Directory mDirectory;;
+ protected Directory mDirectory;
protected IndexSearcher searcher;
@@ -52,7 +52,6 @@ public class TestSpansAdvanced extends LuceneTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
- super.setUp();
// create test index
mDirectory = new RAMDirectory();
@@ -67,10 +66,10 @@ public class TestSpansAdvanced extends LuceneTestCase {
@Override
protected void tearDown() throws Exception {
- super.tearDown();
searcher.close();
mDirectory.close();
mDirectory = null;
+ super.tearDown();
}
/**
diff --git a/src/test/org/apache/lucene/store/TestBufferedIndexInput.java b/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
index 6ce84da41b4..c4e5cd62a1b 100755
--- a/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
+++ b/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
@@ -307,7 +307,7 @@ public class TestBufferedIndexInput extends LuceneTestCase {
//int count = 0;
for (final IndexInput ip : allIndexInputs) {
BufferedIndexInput bii = (BufferedIndexInput) ip;
- int bufferSize = 1024+(int) Math.abs(rand.nextInt() % 32768);
+ int bufferSize = 1024+Math.abs(rand.nextInt() % 32768);
bii.setBufferSize(bufferSize);
//count++;
}
@@ -317,7 +317,7 @@ public class TestBufferedIndexInput extends LuceneTestCase {
@Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
// Make random changes to buffer size
- bufferSize = 1+(int) Math.abs(rand.nextInt() % 10);
+ bufferSize = 1+Math.abs(rand.nextInt() % 10);
IndexInput f = dir.openInput(name, bufferSize);
allIndexInputs.add(f);
return f;
diff --git a/src/test/org/apache/lucene/store/TestLockFactory.java b/src/test/org/apache/lucene/store/TestLockFactory.java
index e1df32d318c..5e78ee9b191 100755
--- a/src/test/org/apache/lucene/store/TestLockFactory.java
+++ b/src/test/org/apache/lucene/store/TestLockFactory.java
@@ -30,7 +30,6 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@@ -331,26 +330,22 @@ public class TestLockFactory extends LuceneTestCase {
e.printStackTrace(System.out);
break;
}
- if (searcher != null) {
- ScoreDoc[] hits = null;
- try {
- hits = searcher.search(query, null, 1000).scoreDocs;
- } catch (IOException e) {
- hitException = true;
- System.out.println("Stress Test Index Searcher: search hit unexpected exception: " + e.toString());
- e.printStackTrace(System.out);
- break;
- }
- // System.out.println(hits.length() + " total results");
- try {
- searcher.close();
- } catch (IOException e) {
- hitException = true;
- System.out.println("Stress Test Index Searcher: close hit unexpected exception: " + e.toString());
- e.printStackTrace(System.out);
- break;
- }
- searcher = null;
+ try {
+ searcher.search(query, null, 1000);
+ } catch (IOException e) {
+ hitException = true;
+ System.out.println("Stress Test Index Searcher: search hit unexpected exception: " + e.toString());
+ e.printStackTrace(System.out);
+ break;
+ }
+ // System.out.println(hits.length() + " total results");
+ try {
+ searcher.close();
+ } catch (IOException e) {
+ hitException = true;
+ System.out.println("Stress Test Index Searcher: close hit unexpected exception: " + e.toString());
+ e.printStackTrace(System.out);
+ break;
}
}
}
diff --git a/src/test/org/apache/lucene/store/TestRAMDirectory.java b/src/test/org/apache/lucene/store/TestRAMDirectory.java
index 0b320c71195..d009c0e2792 100644
--- a/src/test/org/apache/lucene/store/TestRAMDirectory.java
+++ b/src/test/org/apache/lucene/store/TestRAMDirectory.java
@@ -46,7 +46,7 @@ public class TestRAMDirectory extends LuceneTestCase {
// setup the index
@Override
- public void setUp () throws Exception {
+ protected void setUp () throws Exception {
super.setUp();
String tempDir = System.getProperty("java.io.tmpdir");
if (tempDir == null)
@@ -152,12 +152,12 @@ public class TestRAMDirectory extends LuceneTestCase {
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
// cleanup
if (indexDir != null && indexDir.exists()) {
rmDir (indexDir);
}
+ super.tearDown();
}
// LUCENE-1196
diff --git a/src/test/org/apache/lucene/store/TestWindowsMMap.java b/src/test/org/apache/lucene/store/TestWindowsMMap.java
index 367a77749fa..609464deb37 100644
--- a/src/test/org/apache/lucene/store/TestWindowsMMap.java
+++ b/src/test/org/apache/lucene/store/TestWindowsMMap.java
@@ -35,7 +35,7 @@ public class TestWindowsMMap extends LuceneTestCase {
private Random random;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
random = newRandom();
}
diff --git a/src/test/org/apache/lucene/util/LuceneTestCase.java b/src/test/org/apache/lucene/util/LuceneTestCase.java
index 8dd0dc4b445..0b3cdd6744e 100644
--- a/src/test/org/apache/lucene/util/LuceneTestCase.java
+++ b/src/test/org/apache/lucene/util/LuceneTestCase.java
@@ -51,10 +51,7 @@ import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
*
* @see #assertSaneFieldCaches
*
- * @deprecated Replaced by {@link #LuceneTestCaseJ4}
- *
*/
-@Deprecated
public abstract class LuceneTestCase extends TestCase {
public static final Version TEST_VERSION_CURRENT = LuceneTestCaseJ4.TEST_VERSION_CURRENT;
@@ -213,7 +210,7 @@ public abstract class LuceneTestCase extends TestCase {
/**
* Convinience method for logging an array. Wraps the array in an iterator and delegates
- * @see dumpIterator(String,Iterator,PrintStream)
+ * @see #dumpIterator(String,Iterator,PrintStream)
*/
public static void dumpArray(String label, Object[] objs,
PrintStream stream) {
diff --git a/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java b/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
index f79bee5b9da..ade98dc0838 100644
--- a/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
+++ b/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
@@ -56,17 +56,16 @@ import static org.junit.Assert.fail;
* @Before - replaces teardown
* @Test - any public method with this annotation is a test case, regardless
* of its name
- *
- *
+ *
+ *
* See Junit4 documentation for a complete list of features at
* http://junit.org/junit/javadoc/4.7/
- *
+ *
* Import from org.junit rather than junit.framework.
- *
+ *
* You should be able to use this class anywhere you used LuceneTestCase
* if you annotate your derived class correctly with the annotations above
- * @see assertSaneFieldCaches
- *
+ * @see #assertSaneFieldCaches(String)
*/
@@ -233,7 +232,7 @@ public class LuceneTestCaseJ4 {
* @param iter Each next() is toString()ed and logged on it's own line. If iter is null this is logged differnetly then an empty iterator.
* @param stream Stream to log messages to.
*/
- public static void dumpIterator(String label, Iterator iter,
+ public static void dumpIterator(String label, Iterator> iter,
PrintStream stream) {
stream.println("*** BEGIN " + label + " ***");
if (null == iter) {
@@ -249,11 +248,11 @@ public class LuceneTestCaseJ4 {
/**
* Convinience method for logging an array. Wraps the array in an iterator and delegates
*
- * @see dumpIterator(String,Iterator,PrintStream)
+ * @see #dumpIterator(String,Iterator,PrintStream)
*/
public static void dumpArray(String label, Object[] objs,
PrintStream stream) {
- Iterator iter = (null == objs) ? null : Arrays.asList(objs).iterator();
+ Iterator> iter = (null == objs) ? null : Arrays.asList(objs).iterator();
dumpIterator(label, iter, stream);
}
diff --git a/src/test/org/apache/lucene/util/TestBitVector.java b/src/test/org/apache/lucene/util/TestBitVector.java
index 5d2a7a0782d..77c0e6f61e2 100644
--- a/src/test/org/apache/lucene/util/TestBitVector.java
+++ b/src/test/org/apache/lucene/util/TestBitVector.java
@@ -159,7 +159,6 @@ public class TestBitVector extends LuceneTestCase
/**
* Test r/w when size/count cause switching between bit-set and d-gaps file formats.
- * @throws Exception
*/
public void testDgaps() throws IOException {
doTestDgaps(1,0,1);
diff --git a/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java b/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
index f282c146432..e540b856fd0 100644
--- a/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
+++ b/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
@@ -78,29 +78,23 @@ public class TestFieldCacheSanityChecker extends LuceneTestCase {
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
readerA.close();
readerB.close();
readerX.close();
+ super.tearDown();
}
public void testSanity() throws IOException {
FieldCache cache = FieldCache.DEFAULT;
cache.purgeAllCaches();
- double [] doubles;
- int [] ints;
+ cache.getDoubles(readerA, "theDouble");
+ cache.getDoubles(readerA, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER);
+ cache.getDoubles(readerB, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER);
- doubles = cache.getDoubles(readerA, "theDouble");
- doubles = cache.getDoubles(readerA, "theDouble",
- FieldCache.DEFAULT_DOUBLE_PARSER);
- doubles = cache.getDoubles(readerB, "theDouble",
- FieldCache.DEFAULT_DOUBLE_PARSER);
-
- ints = cache.getInts(readerX, "theInt");
- ints = cache.getInts(readerX, "theInt",
- FieldCache.DEFAULT_INT_PARSER);
+ cache.getInts(readerX, "theInt");
+ cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER);
// // //
@@ -118,15 +112,9 @@ public class TestFieldCacheSanityChecker extends LuceneTestCase {
FieldCache cache = FieldCache.DEFAULT;
cache.purgeAllCaches();
- int [] ints;
- String [] strings;
- byte [] bytes;
-
- ints = cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER);
- strings = cache.getStrings(readerX, "theInt");
-
- // this one is ok
- bytes = cache.getBytes(readerX, "theByte");
+ cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER);
+ cache.getStrings(readerX, "theInt");
+ cache.getBytes(readerX, "theByte");
// // //
@@ -148,15 +136,11 @@ public class TestFieldCacheSanityChecker extends LuceneTestCase {
FieldCache cache = FieldCache.DEFAULT;
cache.purgeAllCaches();
- String [] strings;
- byte [] bytes;
+ cache.getStrings(readerA, "theString");
+ cache.getStrings(readerB, "theString");
+ cache.getStrings(readerX, "theString");
- strings = cache.getStrings(readerA, "theString");
- strings = cache.getStrings(readerB, "theString");
- strings = cache.getStrings(readerX, "theString");
-
- // this one is ok
- bytes = cache.getBytes(readerX, "theByte");
+ cache.getBytes(readerX, "theByte");
// // //
diff --git a/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java b/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java
index 37c19a7edd6..6c1eac402dd 100644
--- a/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java
+++ b/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java
@@ -338,7 +338,7 @@ public class TestIndexableBinaryStringTools extends LuceneTestCase {
public String binaryDump(byte[] binary, int numBytes) {
StringBuilder buf = new StringBuilder();
for (int byteNum = 0 ; byteNum < numBytes ; ++byteNum) {
- String hex = Integer.toHexString((int)binary[byteNum] & 0xFF);
+ String hex = Integer.toHexString(binary[byteNum] & 0xFF);
if (hex.length() == 1) {
buf.append('0');
}
@@ -359,7 +359,7 @@ public class TestIndexableBinaryStringTools extends LuceneTestCase {
public String charArrayDump(char[] charArray, int numBytes) {
StringBuilder buf = new StringBuilder();
for (int charNum = 0 ; charNum < numBytes ; ++charNum) {
- String hex = Integer.toHexString((int)charArray[charNum]);
+ String hex = Integer.toHexString(charArray[charNum]);
for (int digit = 0 ; digit < 4 - hex.length() ; ++digit) {
buf.append('0');
}
diff --git a/src/test/org/apache/lucene/util/TestRamUsageEstimator.java b/src/test/org/apache/lucene/util/TestRamUsageEstimator.java
index ad3f5a8121e..aee49d7fed1 100644
--- a/src/test/org/apache/lucene/util/TestRamUsageEstimator.java
+++ b/src/test/org/apache/lucene/util/TestRamUsageEstimator.java
@@ -22,23 +22,17 @@ import junit.framework.TestCase;
public class TestRamUsageEstimator extends TestCase {
public void testBasic() {
- String string = new String("test str");
RamUsageEstimator rue = new RamUsageEstimator();
- long size = rue.estimateRamUsage(string);
- //System.out.println("size:" + size);
+ rue.estimateRamUsage("test str");
- string = new String("test strin");
- size = rue.estimateRamUsage(string);
- //System.out.println("size:" + size);
+ rue.estimateRamUsage("test strin");
Holder holder = new Holder();
holder.holder = new Holder("string2", 5000L);
- size = rue.estimateRamUsage(holder);
- //System.out.println("size:" + size);
+ rue.estimateRamUsage(holder);
String[] strings = new String[]{new String("test strin"), new String("hollow"), new String("catchmaster")};
- size = rue.estimateRamUsage(strings);
- //System.out.println("size:" + size);
+ rue.estimateRamUsage(strings);
}
private static final class Holder {
diff --git a/src/test/org/apache/lucene/util/TestStringIntern.java b/src/test/org/apache/lucene/util/TestStringIntern.java
index c4d8c8f3325..8f6002c1f76 100755
--- a/src/test/org/apache/lucene/util/TestStringIntern.java
+++ b/src/test/org/apache/lucene/util/TestStringIntern.java
@@ -16,9 +16,6 @@
*/
package org.apache.lucene.util;
-
-import junit.framework.TestCase;
-
import java.util.Random;
public class TestStringIntern extends LuceneTestCase {
@@ -48,7 +45,6 @@ public class TestStringIntern extends LuceneTestCase {
int nThreads = 20;
// final int iter=100000;
final int iter=1000000;
- final boolean newStrings=true;
// try native intern
// StringHelper.interner = new StringInterner();
@@ -64,20 +60,20 @@ public class TestStringIntern extends LuceneTestCase {
for (int j=0; j 0);
assertEquals(0, VirtualMethod.compareImplementationDistance(TestClass5.class, publicTestMethod, protectedTestMethod));
-
+ }
+
+ @SuppressWarnings("unchecked")
+ public void testExceptions() {
try {
// cast to Class to remove generics:
- @SuppressWarnings("unchecked") int dist = publicTestMethod.getImplementationDistance((Class) LuceneTestCase.class);
+ publicTestMethod.getImplementationDistance((Class) LuceneTestCase.class);
fail("LuceneTestCase is not a subclass and can never override publicTest(String)");
} catch (IllegalArgumentException arg) {
// pass
diff --git a/src/test/org/apache/lucene/util/cache/TestDoubleBarrelLRUCache.java b/src/test/org/apache/lucene/util/cache/TestDoubleBarrelLRUCache.java
index b67cb12b524..6ba3f2708b5 100644
--- a/src/test/org/apache/lucene/util/cache/TestDoubleBarrelLRUCache.java
+++ b/src/test/org/apache/lucene/util/cache/TestDoubleBarrelLRUCache.java
@@ -89,7 +89,7 @@ public class TestDoubleBarrelLRUCache extends BaseTestLRU {
}
final CacheThread[] threads = new CacheThread[NUM_THREADS];
- final long endTime = System.currentTimeMillis()+((long) 1000);
+ final long endTime = System.currentTimeMillis()+1000L;
for(int i=0;i