From a58c26978f5c7a0dbbbe535c627ef5388fad550b Mon Sep 17 00:00:00 2001
From: Robert Muir
Date: Tue, 30 Nov 2010 11:22:39 +0000
Subject: [PATCH] LUCENE-2781: drop deprecations from trunk
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1040463 13f79535-47bb-0310-9956-ffa450edef68
---
.../byTask/feeds/EnwikiDocMaker.java | 38 -
.../benchmark/byTask/feeds/LineDocMaker.java | 50 -
.../apache/lucene/store/je/JEDirectory.java | 11 +-
.../apache/lucene/store/db/DbDirectory.java | 11 +-
.../lucene/demo/html/SimpleCharStream.java | 4 +-
.../lucene/search/highlight/TextFragment.java | 12 +-
.../vectorhighlight/BaseFragmentsBuilder.java | 27 +-
.../search/highlight/HighlighterTest.java | 36 +-
.../instantiated/InstantiatedIndexWriter.java | 21 +-
.../lucene/index/FieldNormModifier.java | 27 +-
.../lucene/index/MultiPassIndexSplitter.java | 2 +-
.../lucene/misc/LengthNormModifier.java | 144 -
.../apache/lucene/search/BooleanFilter.java | 14 +-
.../apache/lucene/search/ChainedFilter.java | 16 +-
.../lucene/search/regex/SpanRegexQuery.java | 46 -
.../lucene/search/regex/TestRegexQuery.java | 7 +-
.../search/regex/TestSpanRegexQuery.java | 61 -
.../QueryNodeProcessorPipeline.java | 23 +-
.../MultiFieldQueryParserWrapper.java | 269 -
.../standard/QueryParserWrapper.java | 491 -
.../lucene/queryParser/standard/package.html | 7 -
.../standard/parser/JavaCharStream.java | 4 +-
.../ParametricRangeQueryNodeProcessor.java | 14 +-
.../surround/parser/CharStream.java | 4 +-
.../queryParser/core/nodes/TestQueryNode.java | 8 +-
.../precedence/TestPrecedenceQueryParser.java | 21 +-
.../standard/TestMultiAnalyzerWrapper.java | 245 -
.../TestMultiFieldQueryParserWrapper.java | 370 -
.../queryParser/standard/TestQPHelper.java | 92 +-
.../standard/TestQueryParserWrapper.java | 1212 -
.../lucene/search/TestRemoteSearchable.java | 2 +-
.../apache/lucene/search/TestRemoteSort.java | 14 +-
.../geometry/shape/DistanceApproximation.java | 126 -
.../tier/projections/SinusoidalProjector.java | 2 +-
.../apache/lucene/xmlparser/TestParser.java | 2 +-
.../TestNumericRangeFilterBuilder.java | 1 -
.../apache/lucene/analysis/CharTokenizer.java | 196 +-
.../org/apache/lucene/document/DateField.java | 123 -
.../org/apache/lucene/document/DateTools.java | 4 -
.../org/apache/lucene/document/Field.java | 46 +-
.../apache/lucene/document/NumberTools.java | 140 -
.../lucene/index/CompoundFileReader.java | 5 +
.../org/apache/lucene/index/IndexWriter.java | 824 +-
.../apache/lucene/index/SegmentReader.java | 27 -
.../java/org/apache/lucene/index/Term.java | 2 +-
.../index/codecs/preflex/PreFlexCodec.java | 2 +-
.../index/codecs/preflex/PreFlexFields.java | 5 +-
.../index/codecs/preflex/SegmentTermDocs.java | 2 +-
.../index/codecs/preflex/SegmentTermEnum.java | 2 +-
.../codecs/preflex/SegmentTermPositions.java | 6 +-
.../index/codecs/preflex/TermBuffer.java | 5 +
.../lucene/index/codecs/preflex/TermInfo.java | 2 +-
.../index/codecs/preflex/TermInfosReader.java | 2 +-
.../apache/lucene/queryParser/CharStream.java | 4 +-
.../lucene/queryParser/QueryParser.java | 14 +-
.../apache/lucene/queryParser/QueryParser.jj | 14 +-
.../lucene/queryParser/QueryParserBase.java | 91 +-
.../queryParser/QueryParserTokenManager.java | 1 -
.../apache/lucene/search/BooleanScorer2.java | 2 +-
.../org/apache/lucene/search/Similarity.java | 43 +-
.../org/apache/lucene/search/SortField.java | 14 +-
.../apache/lucene/search/TermRangeQuery.java | 10 +-
.../search/function/MultiValueSource.java | 4 +-
.../org/apache/lucene/store/DataOutput.java | 53 -
.../org/apache/lucene/store/Directory.java | 53 +-
.../org/apache/lucene/store/FSDirectory.java | 12 -
.../lucene/store/FileSwitchDirectory.java | 6 -
.../apache/lucene/store/NoLockFactory.java | 9 +-
.../org/apache/lucene/store/RAMDirectory.java | 4 +
.../util/IndexableBinaryStringTools.java | 157 -
.../org/apache/lucene/util/NumericUtils.java | 128 +-
.../org/apache/lucene/util/StringHelper.java | 25 +-
.../java/org/apache/lucene/util/Version.java | 31 +-
.../org/apache/lucene/TestExternalCodecs.java | 10 +-
.../test/org/apache/lucene/TestSearch.java | 16 +-
.../lucene/analysis/TestCharTokenizers.java | 94 -
.../org/apache/lucene/analysis/TestToken.java | 27 +-
.../TestCharTermAttributeImpl.java | 7 +-
.../TestSimpleAttributeImpls.java | 153 -
.../lucene/document/TestBinaryDocument.java | 8 -
.../lucene/document/TestNumberTools.java | 82 -
.../org/apache/lucene/index/Test2BTerms.java | 15 +-
.../apache/lucene/index/TestAddIndexes.java | 177 +-
.../index/TestBackwardsCompatibility.java | 14 +-
.../index/TestConcurrentMergeScheduler.java | 16 +-
.../lucene/index/TestDeletionPolicy.java | 11 +-
.../test/org/apache/lucene/index/TestDoc.java | 24 +-
.../lucene/index/TestDocumentWriter.java | 2 +-
.../apache/lucene/index/TestFieldsReader.java | 9 +-
.../org/apache/lucene/index/TestFlex.java | 8 +-
.../lucene/index/TestIndexFileDeleter.java | 11 +-
.../apache/lucene/index/TestIndexReader.java | 93 +-
.../lucene/index/TestIndexReaderClone.java | 21 +-
.../index/TestIndexReaderCloneNorms.java | 33 +-
.../lucene/index/TestIndexReaderReopen.java | 38 +-
.../apache/lucene/index/TestIndexWriter.java | 299 +-
.../lucene/index/TestIndexWriterConfig.java | 48 -
.../lucene/index/TestIndexWriterDelete.java | 2 +-
.../index/TestIndexWriterExceptions.java | 27 +-
.../index/TestIndexWriterMergePolicy.java | 9 +-
.../lucene/index/TestIndexWriterMerging.java | 21 +-
.../index/TestIndexWriterOnDiskFull.java | 10 +-
.../lucene/index/TestIndexWriterReader.java | 65 +-
.../index/TestIndexWriterWithThreads.java | 44 +-
.../org/apache/lucene/index/TestLazyBug.java | 11 +-
.../lucene/index/TestLazyProxSkipping.java | 13 +-
.../lucene/index/TestMultiLevelSkipList.java | 2 +-
.../index/TestNRTReaderWithThreads.java | 10 +-
.../org/apache/lucene/index/TestNorms.java | 23 +-
.../org/apache/lucene/index/TestOmitTf.java | 35 +-
.../lucene/index/TestParallelReader.java | 28 +-
.../index/TestParallelReaderEmptyIndex.java | 4 +-
.../index/TestPayloadProcessorProvider.java | 17 +-
.../org/apache/lucene/index/TestPayloads.java | 8 +-
.../lucene/index/TestSegmentMerger.java | 2 +-
.../lucene/index/TestSegmentTermEnum.java | 2 +-
.../lucene/index/TestTermVectorsReader.java | 10 +-
.../apache/lucene/index/TestTermdocPerf.java | 11 +-
.../lucene/index/TestThreadedOptimize.java | 12 +-
.../apache/lucene/index/TestTransactions.java | 20 +-
.../lucene/queryParser/TestMultiAnalyzer.java | 4 +-
.../lucene/queryParser/TestQueryParser.java | 92 +-
.../org/apache/lucene/search/CheckHits.java | 7 -
.../org/apache/lucene/search/QueryUtils.java | 16 +-
.../apache/lucene/search/TestBoolean2.java | 2 +-
.../lucene/search/TestCachingSpanFilter.java | 12 +-
.../search/TestCachingWrapperFilter.java | 12 +-
.../lucene/search/TestCustomSearcherSort.java | 3 +-
.../search/TestElevationComparator.java | 8 +-
.../lucene/search/TestExplanations.java | 4 +-
.../apache/lucene/search/TestPhraseQuery.java | 2 +-
.../apache/lucene/search/TestRegexpQuery.java | 6 +-
.../org/apache/lucene/search/TestSort.java | 21 +-
.../lucene/search/TestTermRangeQuery.java | 24 +-
.../search/function/TestCustomScoreQuery.java | 2 +-
.../search/function/TestValueSource.java | 2 +-
.../lucene/search/spans/TestBasics.java | 14 +-
.../spans/TestFieldMaskingSpanQuery.java | 13 +-
.../apache/lucene/search/spans/TestSpans.java | 4 +-
.../lucene/store/MockDirectoryWrapper.java | 11 -
.../lucene/store/TestBufferedIndexInput.java | 18 +-
.../lucene/store/TestFileSwitchDirectory.java | 8 +-
.../apache/lucene/util/LuceneTestCase.java | 135 +-
.../util/TestFieldCacheSanityChecker.java | 2 +-
.../util/TestIndexableBinaryStringTools.java | 152 +-
.../apache/lucene/util/TestStringHelper.java | 40 -
.../org/apache/lucene/util/TestVersion.java | 6 +-
.../org/apache/lucene/util/_TestUtil.java | 2 +-
.../lucene/analysis/ar/ArabicAnalyzer.java | 37 +-
.../analysis/ar/ArabicLetterTokenizer.java | 37 -
.../lucene/analysis/bg/BulgarianAnalyzer.java | 16 +-
.../lucene/analysis/br/BrazilianAnalyzer.java | 92 +-
.../analysis/br/BrazilianStemFilter.java | 17 +-
.../lucene/analysis/cjk/CJKAnalyzer.java | 61 +-
.../lucene/analysis/cn/ChineseAnalyzer.java | 4 +-
.../lucene/analysis/cn/ChineseFilter.java | 4 +-
.../lucene/analysis/cn/ChineseTokenizer.java | 4 +-
.../commongrams/CommonGramsFilter.java | 72 -
.../compound/CompoundWordTokenFilterBase.java | 55 +-
.../DictionaryCompoundWordTokenFilter.java | 63 +-
.../HyphenationCompoundWordTokenFilter.java | 113 +-
.../lucene/analysis/core/LetterTokenizer.java | 34 -
.../lucene/analysis/core/LowerCaseFilter.java | 8 -
.../analysis/core/LowerCaseTokenizer.java | 34 -
.../lucene/analysis/core/SimpleAnalyzer.java | 8 -
.../lucene/analysis/core/StopFilter.java | 134 +-
.../analysis/core/WhitespaceAnalyzer.java | 10 -
.../analysis/core/WhitespaceTokenizer.java | 34 -
.../lucene/analysis/cz/CzechAnalyzer.java | 125 +-
.../lucene/analysis/de/GermanAnalyzer.java | 74 +-
.../lucene/analysis/de/GermanStemFilter.java | 32 +-
.../lucene/analysis/el/GreekAnalyzer.java | 30 +-
.../analysis/el/GreekLowerCaseFilter.java | 6 -
.../lucene/analysis/en/EnglishAnalyzer.java | 6 +-
.../lucene/analysis/fa/PersianAnalyzer.java | 31 -
.../lucene/analysis/fr/ElisionFilter.java | 57 +-
.../lucene/analysis/fr/FrenchAnalyzer.java | 63 +-
.../lucene/analysis/fr/FrenchStemFilter.java | 27 +-
.../miscellaneous/ASCIIFoldingFilter.java | 5 +-
.../miscellaneous/ISOLatin1AccentFilter.java | 262 -
.../miscellaneous/KeepWordFilter.java | 11 +-
.../miscellaneous/PatternAnalyzer.java | 5 +-
.../miscellaneous/WordDelimiterFilter.java | 112 -
.../lucene/analysis/nl/DutchAnalyzer.java | 86 +-
.../lucene/analysis/nl/DutchStemFilter.java | 38 +-
.../lucene/analysis/nl/DutchStemmer.java | 4 +-
.../analysis/payloads/IdentityEncoder.java | 8 -
.../analysis/reverse/ReverseStringFilter.java | 89 +-
.../lucene/analysis/ru/RussianAnalyzer.java | 29 +-
.../analysis/ru/RussianLetterTokenizer.java | 41 +-
.../analysis/ru/RussianLowerCaseFilter.java | 57 -
.../lucene/analysis/ru/RussianStemFilter.java | 93 -
.../lucene/analysis/ru/RussianStemmer.java | 600 -
.../analysis/snowball/SnowballAnalyzer.java | 58 +-
.../analysis/standard/ClassicAnalyzer.java | 8 -
.../analysis/standard/ClassicTokenizer.java | 63 +-
.../standard/ClassicTokenizerImpl.java | 5 -
.../standard/ClassicTokenizerImpl.jflex | 5 -
.../analysis/standard/StandardAnalyzer.java | 8 -
.../analysis/standard/StandardFilter.java | 6 -
.../analysis/standard/StandardTokenizer.java | 62 +-
.../lucene/analysis/synonym/SynonymMap.java | 4 +-
.../lucene/analysis/th/ThaiWordFilter.java | 16 +-
.../lucene/analysis/util/CharArraySet.java | 113 +-
.../tartarus/snowball/SnowballProgram.java | 61 -
.../apache/lucene/analysis/cjk/stopwords.txt | 35 +
.../analysis/br/TestBrazilianStemmer.java | 36 +-
.../commongrams/CommonGramsFilterTest.java | 28 +-
.../lucene/analysis/core/TestAnalyzers.java | 52 +-
.../analysis/core/TestClassicAnalyzer.java | 11 +-
.../analysis/core/TestStopAnalyzer.java | 4 +-
.../lucene/analysis/core/TestStopFilter.java | 24 +-
.../lucene/analysis/cz/TestCzechAnalyzer.java | 48 +-
.../analysis/de/TestGermanAnalyzer.java | 33 +-
.../lucene/analysis/el/GreekAnalyzerTest.java | 15 +-
.../analysis/fa/TestPersianAnalyzer.java | 5 +-
.../analysis/fr/TestFrenchAnalyzer.java | 15 +-
.../TestISOLatin1AccentFilter.java | 113 -
.../miscellaneous/TestKeepWordFilter.java | 5 +-
.../miscellaneous/TestTrimFilter.java | 9 +-
.../TestWordDelimiterFilter.java | 31 +-
.../lucene/analysis/nl/TestDutchStemmer.java | 17 +-
.../pattern/TestPatternTokenizer.java | 4 +-
.../reverse/TestReverseStringFilter.java | 5 +-
.../analysis/ru/TestRussianAnalyzer.java | 58 +-
.../ru/TestRussianLetterTokenizer.java | 2 +-
.../lucene/analysis/ru/TestRussianStem.java | 51 -
.../org/apache/lucene/analysis/ru/resUTF8.htm | 1 -
.../apache/lucene/analysis/ru/stemsUTF8.txt | 49673 ----------------
.../apache/lucene/analysis/ru/testUTF8.txt | 2 -
.../apache/lucene/analysis/ru/wordsUTF8.txt | 49673 ----------------
.../sinks/TestTeeSinkTokenFilter.java | 23 +-
.../analysis/snowball/TestSnowball.java | 2 +-
.../analysis/synonym/TestSynonymFilter.java | 23 +-
.../lucene/analysis/th/TestThaiAnalyzer.java | 6 +-
.../analysis/util/TestCharArraySet.java | 46 +-
.../solr/analysis/DutchStemFilterFactory.java | 36 -
.../analysis/FrenchStemFilterFactory.java | 36 -
.../analysis/GreekLowerCaseFilterFactory.java | 2 +-
.../ISOLatin1AccentFilterFactory.java | 32 -
.../RussianLowerCaseFilterFactory.java | 49 -
.../analysis/RussianStemFilterFactory.java | 37 -
.../java/org/apache/solr/core/SolrConfig.java | 2 +-
.../apache/solr/core/SolrDeletionPolicy.java | 4 +-
.../component/SpellCheckComponent.java | 2 +-
.../org/apache/solr/schema/BinaryField.java | 3 +-
.../org/apache/solr/schema/TrieDateField.java | 2 +-
.../org/apache/solr/schema/TrieField.java | 2 +-
.../apache/solr/search/SolrIndexSearcher.java | 2 +-
.../apache/solr/search/SolrQueryParser.java | 4 +-
.../spelling/AbstractLuceneSpellChecker.java | 2 +-
.../solr/spelling/FileBasedSpellChecker.java | 38 +-
.../apache/solr/update/SolrIndexConfig.java | 81 +-
.../apache/solr/update/SolrIndexWriter.java | 136 +-
.../apache/solr/BasicFunctionalityTest.java | 3 +-
.../analysis/TestDutchStemFilterFactory.java | 41 -
.../analysis/TestFrenchStemFilterFactory.java | 41 -
.../solr/analysis/TestLuceneMatchVersion.java | 21 +-
.../solr/analysis/TestRussianFilters.java | 33 -
.../solr/analysis/TestStandardFactories.java | 13 -
.../solr/core/TestArbitraryIndexDir.java | 11 +-
.../test/org/apache/solr/core/TestConfig.java | 2 +-
.../TestLegacyMergeSchedulerPolicyConfig.java | 4 +-
.../org/apache/solr/core/TestPropInject.java | 4 +-
.../solr/core/TestPropInjectDefaults.java | 4 +-
.../solr/highlight/HighlighterTest.java | 4 +-
.../test/org/apache/solr/search/TestSort.java | 8 +-
.../spelling/IndexBasedSpellCheckerTest.java | 7 +-
.../solr/spelling/SimpleQueryConverter.java | 3 +-
.../spelling/SpellingQueryConverterTest.java | 12 +-
.../solr/conf/schema-luceneMatchVersion.xml | 14 +-
271 files changed, 1533 insertions(+), 110075 deletions(-)
delete mode 100644 lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiDocMaker.java
delete mode 100644 lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LineDocMaker.java
delete mode 100644 lucene/contrib/misc/src/java/org/apache/lucene/misc/LengthNormModifier.java
delete mode 100644 lucene/contrib/queries/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java
delete mode 100644 lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java
delete mode 100644 lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java
delete mode 100644 lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java
delete mode 100644 lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
delete mode 100644 lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java
delete mode 100644 lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geometry/shape/DistanceApproximation.java
delete mode 100644 lucene/src/java/org/apache/lucene/document/DateField.java
delete mode 100644 lucene/src/java/org/apache/lucene/document/NumberTools.java
delete mode 100644 lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java
delete mode 100644 lucene/src/test/org/apache/lucene/document/TestNumberTools.java
delete mode 100644 lucene/src/test/org/apache/lucene/util/TestStringHelper.java
delete mode 100644 modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ISOLatin1AccentFilter.java
delete mode 100644 modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLowerCaseFilter.java
delete mode 100644 modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianStemFilter.java
delete mode 100644 modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java
create mode 100644 modules/analysis/common/src/resources/org/apache/lucene/analysis/cjk/stopwords.txt
delete mode 100644 modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestISOLatin1AccentFilter.java
delete mode 100644 modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java
delete mode 100644 modules/analysis/common/src/test/org/apache/lucene/analysis/ru/resUTF8.htm
delete mode 100644 modules/analysis/common/src/test/org/apache/lucene/analysis/ru/stemsUTF8.txt
delete mode 100644 modules/analysis/common/src/test/org/apache/lucene/analysis/ru/testUTF8.txt
delete mode 100644 modules/analysis/common/src/test/org/apache/lucene/analysis/ru/wordsUTF8.txt
delete mode 100644 solr/src/java/org/apache/solr/analysis/DutchStemFilterFactory.java
delete mode 100644 solr/src/java/org/apache/solr/analysis/FrenchStemFilterFactory.java
delete mode 100644 solr/src/java/org/apache/solr/analysis/ISOLatin1AccentFilterFactory.java
delete mode 100644 solr/src/java/org/apache/solr/analysis/RussianLowerCaseFilterFactory.java
delete mode 100644 solr/src/java/org/apache/solr/analysis/RussianStemFilterFactory.java
delete mode 100644 solr/src/test/org/apache/solr/analysis/TestDutchStemFilterFactory.java
delete mode 100644 solr/src/test/org/apache/solr/analysis/TestFrenchStemFilterFactory.java
diff --git a/lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiDocMaker.java b/lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiDocMaker.java
deleted file mode 100644
index f202b0c16ce..00000000000
--- a/lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiDocMaker.java
+++ /dev/null
@@ -1,38 +0,0 @@
-package org.apache.lucene.benchmark.byTask.feeds;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.benchmark.byTask.utils.Config;
-
-/**
- * A {@link DocMaker} which reads the English Wikipedia dump. Uses
- * {@link EnwikiContentSource} as its content source, regardless if a different
- * content source was defined in the configuration.
- * @deprecated Please use {@link DocMaker} instead, with content.source=EnwikiContentSource
- */
-@Deprecated
-public class EnwikiDocMaker extends DocMaker {
- @Override
- public void setConfig(Config config) {
- super.setConfig(config);
- // Override whatever content source was set in the config
- source = new EnwikiContentSource();
- source.setConfig(config);
- System.out.println("NOTE: EnwikiDocMaker is deprecated; please use DocMaker instead (which is the default if you don't specify doc.maker) with content.source=EnwikiContentSource");
- }
-}
diff --git a/lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LineDocMaker.java b/lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LineDocMaker.java
deleted file mode 100644
index 5f54c0f6646..00000000000
--- a/lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LineDocMaker.java
+++ /dev/null
@@ -1,50 +0,0 @@
-package org.apache.lucene.benchmark.byTask.feeds;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.benchmark.byTask.utils.Config;
-
-/**
- * A DocMaker reading one line at a time as a Document from a single file. This
- * saves IO cost (over DirContentSource) of recursing through a directory and
- * opening a new file for every document. It also re-uses its Document and Field
- * instance to improve indexing speed.
- * The expected format of each line is (arguments are separated by <TAB>):
- * title, date, body. If a line is read in a different format, a
- * {@link RuntimeException} will be thrown. In general, you should use this doc
- * maker with files that were created with
- * {@link org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTask}.
- *
- * Config properties:
- *
- * - doc.random.id.limit=N (default -1) -- create random docid in the range
- * 0..N; this is useful with UpdateDoc to test updating random documents; if
- * this is unspecified or -1, then docid is sequentially assigned
- *
- * @deprecated Please use {@link DocMaker} instead, with content.source=LineDocSource
- */
-@Deprecated
-public class LineDocMaker extends DocMaker {
- @Override
- public void setConfig(Config config) {
- super.setConfig(config);
- source = new LineDocSource();
- source.setConfig(config);
- System.out.println("NOTE: LineDocMaker is deprecated; please use DocMaker instead (which is the default if you don't specify doc.maker) with content.source=LineDocSource");
- }
-}
diff --git a/lucene/contrib/db/bdb-je/src/java/org/apache/lucene/store/je/JEDirectory.java b/lucene/contrib/db/bdb-je/src/java/org/apache/lucene/store/je/JEDirectory.java
index 83c6b98386e..8e2e760d697 100644
--- a/lucene/contrib/db/bdb-je/src/java/org/apache/lucene/store/je/JEDirectory.java
+++ b/lucene/contrib/db/bdb-je/src/java/org/apache/lucene/store/je/JEDirectory.java
@@ -21,12 +21,7 @@ import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
+import java.util.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
@@ -195,6 +190,10 @@ public class JEDirectory extends Directory {
return new JEIndexInput(this, name);
}
+ @Override
+ public void sync(Collection names) throws IOException {
+ }
+
@Override
public Lock makeLock(String name) {
return new JELock();
diff --git a/lucene/contrib/db/bdb/src/java/org/apache/lucene/store/db/DbDirectory.java b/lucene/contrib/db/bdb/src/java/org/apache/lucene/store/db/DbDirectory.java
index 74478b1ed09..829dbedda3d 100644
--- a/lucene/contrib/db/bdb/src/java/org/apache/lucene/store/db/DbDirectory.java
+++ b/lucene/contrib/db/bdb/src/java/org/apache/lucene/store/db/DbDirectory.java
@@ -21,12 +21,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.Collections;
+import java.util.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.Lock;
@@ -211,6 +206,10 @@ public class DbDirectory extends Directory {
}
@Override
+ public void sync(Collection names) throws IOException {
+ }
+
+ @Override
public IndexInput openInput(String name)
throws IOException
{
diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/html/SimpleCharStream.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/html/SimpleCharStream.java
index fedf92d9456..a1727ff689f 100644
--- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/html/SimpleCharStream.java
+++ b/lucene/contrib/demo/src/java/org/apache/lucene/demo/html/SimpleCharStream.java
@@ -204,7 +204,7 @@ public class SimpleCharStream
}
/**
- * @deprecated
+ * @deprecated (gen)
* @see #getEndColumn
*/
@@ -213,7 +213,7 @@ public class SimpleCharStream
}
/**
- * @deprecated
+ * @deprecated (gen)
* @see #getEndLine
*/
diff --git a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TextFragment.java b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TextFragment.java
index 4829cd24a71..11d8e533ea2 100644
--- a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TextFragment.java
+++ b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TextFragment.java
@@ -37,17 +37,7 @@ public class TextFragment
this.textStartPos = textStartPos;
this.fragNum = fragNum;
}
- /**
- * @deprecated Use {@link #TextFragment(CharSequence, int, int)} instead.
- * This constructor will be removed in Lucene 4.0
- */
- @Deprecated
- public TextFragment(StringBuffer markedUpText,int textStartPos, int fragNum)
- {
- this.markedUpText=markedUpText;
- this.textStartPos = textStartPos;
- this.fragNum = fragNum;
- }
+
void setScore(float score)
{
this.score=score;
diff --git a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java
index 2d90077d22f..064f1da3d9f 100644
--- a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java
+++ b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java
@@ -107,25 +107,12 @@ public abstract class BaseFragmentsBuilder implements FragmentsBuilder {
return fragments.toArray( new String[fragments.size()] );
}
- @Deprecated
- protected String[] getFieldValues( IndexReader reader, int docId, String fieldName) throws IOException {
- Document doc = reader.document( docId, new MapFieldSelector( new String[]{ fieldName } ) );
- return doc.getValues( fieldName ); // according to Document class javadoc, this never returns null
- }
-
protected Field[] getFields( IndexReader reader, int docId, String fieldName) throws IOException {
// according to javadoc, doc.getFields(fieldName) cannot be used with lazy loaded field???
- Document doc = reader.document( docId, new MapFieldSelector( new String[]{ fieldName } ) );
+ Document doc = reader.document( docId, new MapFieldSelector(fieldName) );
return doc.getFields( fieldName ); // according to Document class javadoc, this never returns null
}
- @Deprecated
- protected String makeFragment( StringBuilder buffer, int[] index, String[] values, WeightedFragInfo fragInfo ){
- final int s = fragInfo.startOffset;
- return makeFragment( fragInfo, getFragmentSource( buffer, index, values, s, fragInfo.endOffset ), s,
- preTags, postTags, NULL_ENCODER );
- }
-
protected String makeFragment( StringBuilder buffer, int[] index, Field[] values, WeightedFragInfo fragInfo,
String[] preTags, String[] postTags, Encoder encoder ){
final int s = fragInfo.startOffset;
@@ -151,18 +138,6 @@ public abstract class BaseFragmentsBuilder implements FragmentsBuilder {
return fragment.toString();
}
- @Deprecated
- protected String getFragmentSource( StringBuilder buffer, int[] index, String[] values,
- int startOffset, int endOffset ){
- while( buffer.length() < endOffset && index[0] < values.length ){
- buffer.append( values[index[0]] );
- buffer.append( multiValuedSeparator );
- index[0]++;
- }
- int eo = buffer.length() < endOffset ? buffer.length() : endOffset;
- return buffer.substring( startOffset, eo );
- }
-
protected String getFragmentSource( StringBuilder buffer, int[] index, Field[] values,
int startOffset, int endOffset ){
while( buffer.length() < endOffset && index[0] < values.length ){
diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
index 51b2d274ce0..b42fa173857 100644
--- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
+++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
@@ -70,12 +70,7 @@ import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.highlight.SynonymTokenizer.TestHighlightRunner;
import org.apache.lucene.search.regex.RegexQuery;
-import org.apache.lucene.search.regex.SpanRegexQuery;
-import org.apache.lucene.search.spans.SpanNearQuery;
-import org.apache.lucene.search.spans.SpanNotQuery;
-import org.apache.lucene.search.spans.SpanOrQuery;
-import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.search.spans.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.automaton.BasicAutomata;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
@@ -300,8 +295,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
}
public void testSpanRegexQuery() throws Exception {
- query = new SpanOrQuery(new SpanQuery [] {
- new SpanRegexQuery(new Term(FIELD_NAME, "ken.*")) });
+ query = new SpanOrQuery(new SpanMultiTermQueryWrapper(new RegexQuery(new Term(FIELD_NAME, "ken.*"))));
searcher = new IndexSearcher(ramDir, true);
hits = searcher.search(query, 100);
int maxNumFragmentsRequired = 2;
@@ -698,8 +692,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
int maxNumFragmentsRequired = 2;
String fragmentSeparator = "...";
- QueryScorer scorer = null;
- TokenStream tokenStream = null;
+ QueryScorer scorer;
+ TokenStream tokenStream;
tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
@@ -726,8 +720,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
int maxNumFragmentsRequired = 2;
String fragmentSeparator = "...";
- QueryScorer scorer = null;
- TokenStream tokenStream = null;
+ QueryScorer scorer;
+ TokenStream tokenStream;
tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
@@ -754,8 +748,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
int maxNumFragmentsRequired = 2;
String fragmentSeparator = "...";
- QueryScorer scorer = null;
- TokenStream tokenStream = null;
+ QueryScorer scorer;
+ TokenStream tokenStream;
tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
@@ -820,8 +814,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
Highlighter hg = new Highlighter(new SimpleHTMLFormatter(), new QueryTermScorer(query));
hg.setTextFragmenter(new NullFragmenter());
- String match = null;
- match = hg.getBestFragment(analyzer, "data", "help me [54-65]");
+ String match = hg.getBestFragment(analyzer, "data", "help me [54-65]");
assertEquals("help me [54-65]", match);
}
@@ -1133,7 +1126,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
TermQuery query = new TermQuery(new Term("data", goodWord));
- String match = null;
+ String match;
StringBuilder sb = new StringBuilder();
sb.append(goodWord);
for (int i = 0; i < 10000; i++) {
@@ -1246,8 +1239,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void run() throws Exception {
doSearching("AnInvalidQueryWhichShouldYieldNoResults");
- for (int i = 0; i < texts.length; i++) {
- String text = texts[i];
+ for (String text : texts) {
TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
HighlighterTest.this);
@@ -1716,8 +1708,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
ramDir = newDirectory();
IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
- for (int i = 0; i < texts.length; i++) {
- addDoc(writer, texts[i]);
+ for (String text : texts) {
+ addDoc(writer, text);
}
Document doc = new Document();
NumericField nfield = new NumericField(NUMERIC_FIELD_NAME, Store.YES, true);
@@ -1881,7 +1873,7 @@ final class SynonymTokenizer extends TokenStream {
}
public Highlighter getHighlighter(Query query, String fieldName, TokenStream stream, Formatter formatter, boolean expanMultiTerm) {
- Scorer scorer = null;
+ Scorer scorer;
if (mode == QUERY) {
scorer = new QueryScorer(query, fieldName);
if(!expanMultiTerm) {
diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
index 9feb9c727de..59a3b45746a 100644
--- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
+++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
@@ -64,8 +64,6 @@ public class InstantiatedIndexWriter implements Closeable {
private PrintStream infoStream = null;
- private int maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
-
private final InstantiatedIndex index;
private final Analyzer analyzer;
@@ -431,9 +429,7 @@ public class InstantiatedIndexWriter implements Closeable {
};
/**
- * Adds a document to this index. If the document contains more than
- * {@link #setMaxFieldLength(int)} terms for a given field, the remainder are
- * discarded.
+ * Adds a document to this index.
*/
public void addDocument(Document doc) throws IOException {
addDocument(doc, getAnalyzer());
@@ -441,9 +437,7 @@ public class InstantiatedIndexWriter implements Closeable {
/**
* Adds a document to this index, using the provided analyzer instead of the
- * value of {@link #getAnalyzer()}. If the document contains more than
- * {@link #setMaxFieldLength(int)} terms for a given field, the remainder are
- * discarded.
+ * value of {@link #getAnalyzer()}.
*
* @param doc
* @param analyzer
@@ -555,9 +549,6 @@ public class InstantiatedIndexWriter implements Closeable {
}
tokens.add(token); // the vector will be built on commit.
fieldSetting.fieldLength++;
- if (fieldSetting.fieldLength > maxFieldLength) {
- break;
- }
}
tokenStream.end();
tokenStream.close();
@@ -666,14 +657,6 @@ public class InstantiatedIndexWriter implements Closeable {
addDocument(doc, analyzer);
}
- public int getMaxFieldLength() {
- return maxFieldLength;
- }
-
- public void setMaxFieldLength(int maxFieldLength) {
- this.maxFieldLength = maxFieldLength;
- }
-
public Similarity getSimilarity() {
return similarity;
}
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java
index bf4804ed1e3..9cfd56803ca 100644
--- a/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java
+++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java
@@ -22,6 +22,7 @@ import java.util.Date;
import java.util.List;
import java.util.ArrayList;
+import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
@@ -52,19 +53,21 @@ public class FieldNormModifier {
*/
public static void main(String[] args) throws IOException {
if (args.length < 3) {
- System.err.println("Usage: FieldNormModifier [field2] ...");
+ System.err.println("Usage: FieldNormModifier [field2] ...");
System.exit(1);
}
Similarity s = null;
- if (!args[1].equals("-n")) {
- try {
- s = Class.forName(args[1]).asSubclass(Similarity.class).newInstance();
- } catch (Exception e) {
- System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
- e.printStackTrace(System.err);
- System.exit(1);
- }
+
+ if (args[1].equals("-d"))
+ args[1] = DefaultSimilarity.class.getName();
+
+ try {
+ s = Class.forName(args[1]).asSubclass(Similarity.class).newInstance();
+ } catch (Exception e) {
+ System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
+ e.printStackTrace(System.err);
+ System.exit(1);
}
Directory d = FSDirectory.open(new File(args[0]));
@@ -142,11 +145,7 @@ public class FieldNormModifier {
for (int d = 0; d < termCounts.length; d++) {
if (delDocs == null || !delDocs.get(d)) {
- if (sim == null) {
- subReader.setNorm(d, fieldName, Similarity.encodeNorm(1.0f));
- } else {
- subReader.setNorm(d, fieldName, sim.encodeNormValue(sim.lengthNorm(fieldName, termCounts[d])));
- }
+ subReader.setNorm(d, fieldName, sim.encodeNormValue(sim.lengthNorm(fieldName, termCounts[d])));
}
}
}
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java
index 8526af82e1c..ce42d8bbc99 100644
--- a/lucene/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java
+++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java
@@ -96,7 +96,7 @@ public class MultiPassIndexSplitter {
new WhitespaceAnalyzer(Version.LUCENE_CURRENT))
.setOpenMode(OpenMode.CREATE));
System.err.println("Writing part " + (i + 1) + " ...");
- w.addIndexes(new IndexReader[]{input});
+ w.addIndexes(input);
w.close();
}
System.err.println("Done.");
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/misc/LengthNormModifier.java b/lucene/contrib/misc/src/java/org/apache/lucene/misc/LengthNormModifier.java
deleted file mode 100644
index fecc5b06860..00000000000
--- a/lucene/contrib/misc/src/java/org/apache/lucene/misc/LengthNormModifier.java
+++ /dev/null
@@ -1,144 +0,0 @@
-package org.apache.lucene.misc;
-
-/**
- * Copyright 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.util.StringHelper;
-import org.apache.lucene.util.Bits;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Date;
-
-/**
- * Given a directory, a Similarity, and a list of fields, updates the
- * fieldNorms in place for every document using the Similarity.lengthNorm.
- *
- *
- * NOTE: This only works if you do not use field/document boosts in your
- * index.
- *
- *
- * @version $Id$
- * @deprecated Use {@link org.apache.lucene.index.FieldNormModifier}
- */
-@Deprecated
-public class LengthNormModifier {
-
- /**
- * Command Line Execution method.
- *
- *
- * Usage: LengthNormModifier /path/index package.SimilarityClassName field1 field2 ...
- *
- */
- public static void main(String[] args) throws IOException {
- if (args.length < 3) {
- System.err.println("Usage: LengthNormModifier [field2] ...");
- System.exit(1);
- }
-
- Similarity s = null;
- try {
- s = Class.forName(args[1]).asSubclass(Similarity.class).newInstance();
- } catch (Exception e) {
- System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
- e.printStackTrace(System.err);
- }
-
- File index = new File(args[0]);
- Directory d = FSDirectory.open(index);
-
- LengthNormModifier lnm = new LengthNormModifier(d, s);
-
- for (int i = 2; i < args.length; i++) {
- System.out.print("Updating field: " + args[i] + " " + (new Date()).toString() + " ... ");
- lnm.reSetNorms(args[i]);
- System.out.println(new Date().toString());
- }
-
- d.close();
- }
-
-
- private Directory dir;
- private Similarity sim;
-
- /**
- * Constructor for code that wishes to use this class progaomatically.
- *
- * @param d The Directory to modify
- * @param s The Similarity to use in reSetNorms
- */
- public LengthNormModifier(Directory d, Similarity s) {
- dir = d;
- sim = s;
- }
-
- /**
- * Resets the norms for the specified field.
- *
- *
- * Opens a new IndexReader on the Directory given to this instance,
- * modifies the norms using the Similarity given to this instance,
- * and closes the IndexReader.
- *
- *
- * @param field the field whose norms should be reset
- */
- public void reSetNorms(String field) throws IOException {
- String fieldName = StringHelper.intern(field);
- int[] termCounts = new int[0];
-
- IndexReader reader = IndexReader.open(dir, false);
- try {
-
- termCounts = new int[reader.maxDoc()];
- Bits delDocs = MultiFields.getDeletedDocs(reader);
- DocsEnum docs = null;
-
- Terms terms = MultiFields.getTerms(reader, field);
- if (terms != null) {
- TermsEnum termsEnum = terms.iterator();
- while(termsEnum.next() != null) {
- docs = termsEnum.docs(delDocs, docs);
- int doc;
- while ((doc = docs.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
- termCounts[doc] += docs.freq();
- }
- }
- }
-
- for (int d = 0; d < termCounts.length; d++) {
- if (!delDocs.get(d)) {
- byte norm = Similarity.encodeNorm(sim.lengthNorm(fieldName, termCounts[d]));
- reader.setNorm(d, fieldName, norm);
- }
- }
- } finally {
- reader.close();
- }
- }
-
-}
diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/BooleanFilter.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/BooleanFilter.java
index a514c380d7f..2e6868e5202 100644
--- a/lucene/contrib/queries/src/java/org/apache/lucene/search/BooleanFilter.java
+++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/BooleanFilter.java
@@ -106,27 +106,15 @@ public class BooleanFilter extends Filter
}
if (res !=null)
- return finalResult(res, reader.maxDoc());
+ return res;
return DocIdSet.EMPTY_DOCIDSET;
}
- /** Provide a SortedVIntList when it is definitely smaller
- * than an OpenBitSet.
- * @deprecated Either use CachingWrapperFilter, or
- * switch to a different DocIdSet implementation yourself.
- * This method will be removed in Lucene 4.0
- */
- @Deprecated
- protected final DocIdSet finalResult(OpenBitSetDISI result, int maxDocs) {
- return result;
- }
-
/**
* Adds a new FilterClause to the Boolean Filter container
* @param filterClause A FilterClause object containing a Filter and an Occur parameter
*/
-
public void add(FilterClause filterClause)
{
if (filterClause.getOccur().equals(Occur.MUST)) {
diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/ChainedFilter.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/ChainedFilter.java
index f06d0e23a06..e95b50660d8 100644
--- a/lucene/contrib/queries/src/java/org/apache/lucene/search/ChainedFilter.java
+++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/ChainedFilter.java
@@ -149,18 +149,6 @@ public class ChainedFilter extends Filter
return result;
}
- /** Provide a SortedVIntList when it is definitely
- * smaller than an OpenBitSet
- * @deprecated Either use CachingWrapperFilter, or
- * switch to a different DocIdSet implementation yourself.
- * This method will be removed in Lucene 4.0
- **/
- @Deprecated
- protected final DocIdSet finalResult(OpenBitSetDISI result, int maxDocs) {
- return result;
- }
-
-
/**
* Delegates to each filter in the chain.
* @param reader IndexReader
@@ -175,7 +163,7 @@ public class ChainedFilter extends Filter
{
doChain(result, logic, chain[index[0]].getDocIdSet(reader));
}
- return finalResult(result, reader.maxDoc());
+ return result;
}
/**
@@ -195,7 +183,7 @@ public class ChainedFilter extends Filter
{
doChain(result, logic[index[0]], chain[index[0]].getDocIdSet(reader));
}
- return finalResult(result, reader.maxDoc());
+ return result;
}
@Override
diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java
deleted file mode 100644
index 818a9497deb..00000000000
--- a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java
+++ /dev/null
@@ -1,46 +0,0 @@
-package org.apache.lucene.search.regex;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
-
-/**
- * A SpanQuery version of {@link RegexQuery} allowing regular expression
- * queries to be nested within other SpanQuery subclasses.
- * @deprecated Use new SpanMultiTermQueryWrapper<RegexQuery>(new RegexQuery())
instead.
- * This query will be removed in Lucene 4.0
- */
-@Deprecated
-public class SpanRegexQuery extends SpanMultiTermQueryWrapper implements RegexQueryCapable {
- private final RegexCapabilities regexImpl = new JavaUtilRegexCapabilities();
-
- public SpanRegexQuery(Term term) {
- super(new RegexQuery(term));
- }
-
- public Term getTerm() { return query.getTerm(); }
-
- public void setRegexImplementation(RegexCapabilities impl) {
- query.setRegexImplementation(impl);
- }
-
- public RegexCapabilities getRegexImplementation() {
- return query.getRegexImplementation();
- }
-}
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
index e0baef7687a..8fa1ba82469 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
@@ -17,6 +17,7 @@ package org.apache.lucene.search.regex;
* limitations under the License.
*/
+import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.store.Directory;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
@@ -73,10 +74,10 @@ public class TestRegexQuery extends LuceneTestCase {
}
private int spanRegexQueryNrHits(String regex1, String regex2, int slop, boolean ordered) throws Exception {
- SpanRegexQuery srq1 = new SpanRegexQuery( newTerm(regex1));
- SpanRegexQuery srq2 = new SpanRegexQuery( newTerm(regex2));
+ SpanQuery srq1 = new SpanMultiTermQueryWrapper(new RegexQuery(newTerm(regex1)));
+ SpanQuery srq2 = new SpanMultiTermQueryWrapper(new RegexQuery(newTerm(regex2)));
SpanNearQuery query = new SpanNearQuery( new SpanQuery[]{srq1, srq2}, slop, ordered);
-
+
return searcher.search(query, null, 1000).totalHits;
}
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
index 762478686ac..3ce218ca7dd 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
@@ -112,67 +112,6 @@ public class TestSpanRegexQuery extends LuceneTestCase {
indexStoreB.close();
}
- /** remove in lucene 4.0 */
- @Deprecated
- public void testSpanRegexOld() throws Exception {
- Directory directory = newDirectory();
- IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- // doc.add(newField("field", "the quick brown fox jumps over the lazy dog",
- // Field.Store.NO, Field.Index.ANALYZED));
- // writer.addDocument(doc);
- // doc = new Document();
- doc.add(newField("field", "auto update", Field.Store.NO,
- Field.Index.ANALYZED));
- writer.addDocument(doc);
- doc = new Document();
- doc.add(newField("field", "first auto update", Field.Store.NO,
- Field.Index.ANALYZED));
- writer.addDocument(doc);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(directory, true);
- SpanRegexQuery srq = new SpanRegexQuery(new Term("field", "aut.*"));
- SpanFirstQuery sfq = new SpanFirstQuery(srq, 1);
- // SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {srq, stq}, 6,
- // true);
- int numHits = searcher.search(sfq, null, 1000).totalHits;
- assertEquals(1, numHits);
- searcher.close();
- directory.close();
- }
-
- /** remove in lucene 4.0 */
- @Deprecated
- public void testSpanRegexBugOld() throws CorruptIndexException, IOException {
- createRAMDirectories();
-
- SpanRegexQuery srq = new SpanRegexQuery(new Term("field", "a.*"));
- SpanRegexQuery stq = new SpanRegexQuery(new Term("field", "b.*"));
- SpanNearQuery query = new SpanNearQuery(new SpanQuery[] { srq, stq }, 6,
- true);
-
- // 1. Search the same store which works
- IndexSearcher[] arrSearcher = new IndexSearcher[2];
- arrSearcher[0] = new IndexSearcher(indexStoreA, true);
- arrSearcher[1] = new IndexSearcher(indexStoreB, true);
- MultiSearcher searcher = new MultiSearcher(arrSearcher);
- int numHits = searcher.search(query, null, 1000).totalHits;
- arrSearcher[0].close();
- arrSearcher[1].close();
-
- // Will fail here
- // We expect 2 but only one matched
- // The rewriter function only write it once on the first IndexSearcher
- // So it's using term: a1 b1 to search on the second IndexSearcher
- // As a result, it won't match the document in the second IndexSearcher
- assertEquals(2, numHits);
- indexStoreA.close();
- indexStoreB.close();
- }
-
private void createRAMDirectories() throws CorruptIndexException,
LockObtainFailedException, IOException {
// creating a document to store
diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/processors/QueryNodeProcessorPipeline.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/processors/QueryNodeProcessorPipeline.java
index 7e00e1a0d70..68d3638f8e1 100644
--- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/processors/QueryNodeProcessorPipeline.java
+++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/processors/QueryNodeProcessorPipeline.java
@@ -17,11 +17,7 @@ package org.apache.lucene.queryParser.core.processors;
* limitations under the License.
*/
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.ListIterator;
+import java.util.*;
import org.apache.lucene.queryParser.core.QueryNodeException;
import org.apache.lucene.queryParser.core.config.QueryConfigHandler;
@@ -96,23 +92,6 @@ public class QueryNodeProcessorPipeline implements QueryNodeProcessor,
}
- /**
- * Adds a processor to the pipeline, it's always added to the end of the
- * pipeline.
- *
- * @deprecated this class now conforms to {@link List} interface, so use
- * {@link #add(QueryNodeProcessor)} instead
- *
- * @param processor the processor to be added
- */
- @Deprecated
- public void addProcessor(QueryNodeProcessor processor) {
- this.processors.add(processor);
-
- processor.setQueryConfigHandler(this.queryConfig);
-
- }
-
/**
* For reference about this method check:
* {@link QueryNodeProcessor#setQueryConfigHandler(QueryConfigHandler)}.
diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java
deleted file mode 100644
index 46eb864a250..00000000000
--- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java
+++ /dev/null
@@ -1,269 +0,0 @@
-package org.apache.lucene.queryParser.standard;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Map;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Query;
-
-/**
- * This class behaves as the as the lucene 2.4 MultiFieldQueryParser class, but uses the new
- * query parser interface instead of the old one.
- *
- * This class should be used when the new query parser features are needed and
- * also keep at the same time the old query parser interface.
- *
- * @deprecated this class will be removed soon, it's a temporary class to be
- * used along the transition from the old query parser to the new
- * one
- */
-@Deprecated
-public class MultiFieldQueryParserWrapper extends QueryParserWrapper {
-
- /**
- * Creates a MultiFieldQueryParser. Allows passing of a map with term to
- * Boost, and the boost to apply to each term.
- *
- *
- * It will, when parse(String query) is called, construct a query like this
- * (assuming the query consists of two terms and you specify the two fields
- * title
and body
):
- *
- *
- *
- * (title:term1 body:term1) (title:term2 body:term2)
- *
- *
- *
- * When setDefaultOperator(AND_OPERATOR) is set, the result will be:
- *
- *
- *
- * +(title:term1 body:term1) +(title:term2 body:term2)
- *
- *
- *
- * When you pass a boost (title=>5 body=>10) you can get
- *
- *
- *
- * +(title:term1^5.0 body:term1^10.0) +(title:term2^5.0 body:term2^10.0)
- *
- *
- *
- * In other words, all the query's terms must appear, but it doesn't matter in
- * what fields they appear.
- *
- */
-public MultiFieldQueryParserWrapper(String[] fields, Analyzer analyzer, Map boosts) {
- this(fields, analyzer);
- StandardQueryParser qpHelper = getQueryParserHelper();
-
- qpHelper.setMultiFields(fields);
- qpHelper.setFieldsBoost(boosts);
-
- }
-
- /**
- * Creates a MultiFieldQueryParser.
- *
- *
- * It will, when parse(String query) is called, construct a query like this
- * (assuming the query consists of two terms and you specify the two fields
- * title
and body
):
- *
- *
- *
- * (title:term1 body:term1) (title:term2 body:term2)
- *
- *
- *
- * When setDefaultOperator(AND_OPERATOR) is set, the result will be:
- *
- *
- *
- * +(title:term1 body:term1) +(title:term2 body:term2)
- *
- *
- *
- * In other words, all the query's terms must appear, but it doesn't matter in
- * what fields they appear.
- *
- */
- public MultiFieldQueryParserWrapper(String[] fields, Analyzer analyzer) {
- super(null, analyzer);
-
- StandardQueryParser qpHelper = getQueryParserHelper();
- qpHelper.setAnalyzer(analyzer);
-
- qpHelper.setMultiFields(fields);
- }
-
- /**
- * Parses a query which searches on the fields specified.
- *
- * If x fields are specified, this effectively constructs:
- *
- *
- * <code>
- * (field1:query1) (field2:query2) (field3:query3)...(fieldx:queryx)
- * </code>
- *
- *
- * @param queries
- * Queries strings to parse
- * @param fields
- * Fields to search on
- * @param analyzer
- * Analyzer to use
- * @throws ParseException
- * if query parsing fails
- * @throws IllegalArgumentException
- * if the length of the queries array differs from the length of the
- * fields array
- */
- public static Query parse(String[] queries, String[] fields, Analyzer analyzer)
- throws ParseException {
- if (queries.length != fields.length)
- throw new IllegalArgumentException("queries.length != fields.length");
- BooleanQuery bQuery = new BooleanQuery();
- for (int i = 0; i < fields.length; i++) {
- QueryParserWrapper qp = new QueryParserWrapper(fields[i], analyzer);
- Query q = qp.parse(queries[i]);
- if (q != null && // q never null, just being defensive
- (!(q instanceof BooleanQuery) || ((BooleanQuery) q).getClauses().length > 0)) {
- bQuery.add(q, BooleanClause.Occur.SHOULD);
- }
- }
- return bQuery;
- }
-
- /**
- * Parses a query, searching on the fields specified. Use this if you need to
- * specify certain fields as required, and others as prohibited.
- *
- *
- *
- * Usage:
- * <code>
- * String[] fields = {"filename", "contents", "description"};
- * BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
- * BooleanClause.Occur.MUST,
- * BooleanClause.Occur.MUST_NOT};
- * MultiFieldQueryParser.parse("query", fields, flags, analyzer);
- * </code>
- *
- *
- * The code above would construct a query:
- *
- *
- * <code>
- * (filename:query) +(contents:query) -(description:query)
- * </code>
- *
- *
- * @param query
- * Query string to parse
- * @param fields
- * Fields to search on
- * @param flags
- * Flags describing the fields
- * @param analyzer
- * Analyzer to use
- * @throws ParseException
- * if query parsing fails
- * @throws IllegalArgumentException
- * if the length of the fields array differs from the length of the
- * flags array
- */
- public static Query parse(String query, String[] fields,
- BooleanClause.Occur[] flags, Analyzer analyzer) throws ParseException {
- if (fields.length != flags.length)
- throw new IllegalArgumentException("fields.length != flags.length");
- BooleanQuery bQuery = new BooleanQuery();
- for (int i = 0; i < fields.length; i++) {
- QueryParserWrapper qp = new QueryParserWrapper(fields[i], analyzer);
- Query q = qp.parse(query);
- if (q != null && // q never null, just being defensive
- (!(q instanceof BooleanQuery) || ((BooleanQuery) q).getClauses().length > 0)) {
- bQuery.add(q, flags[i]);
- }
- }
- return bQuery;
- }
-
- /**
- * Parses a query, searching on the fields specified. Use this if you need to
- * specify certain fields as required, and others as prohibited.
- *
- *
- *
- * Usage:
- * <code>
- * String[] query = {"query1", "query2", "query3"};
- * String[] fields = {"filename", "contents", "description"};
- * BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
- * BooleanClause.Occur.MUST,
- * BooleanClause.Occur.MUST_NOT};
- * MultiFieldQueryParser.parse(query, fields, flags, analyzer);
- * </code>
- *
- *
- * The code above would construct a query:
- *
- *
- * <code>
- * (filename:query1) +(contents:query2) -(description:query3)
- * </code>
- *
- *
- * @param queries
- * Queries string to parse
- * @param fields
- * Fields to search on
- * @param flags
- * Flags describing the fields
- * @param analyzer
- * Analyzer to use
- * @throws ParseException
- * if query parsing fails
- * @throws IllegalArgumentException
- * if the length of the queries, fields, and flags array differ
- */
- public static Query parse(String[] queries, String[] fields,
- BooleanClause.Occur[] flags, Analyzer analyzer) throws ParseException {
- if (!(queries.length == fields.length && queries.length == flags.length))
- throw new IllegalArgumentException(
- "queries, fields, and flags array have have different length");
- BooleanQuery bQuery = new BooleanQuery();
- for (int i = 0; i < fields.length; i++) {
- QueryParserWrapper qp = new QueryParserWrapper(fields[i], analyzer);
- Query q = qp.parse(queries[i]);
- if (q != null && // q never null, just being defensive
- (!(q instanceof BooleanQuery) || ((BooleanQuery) q).getClauses().length > 0)) {
- bQuery.add(q, flags[i]);
- }
- }
- return bQuery;
- }
-
-}
diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java
deleted file mode 100644
index 839cfa3aae5..00000000000
--- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java
+++ /dev/null
@@ -1,491 +0,0 @@
-package org.apache.lucene.queryParser.standard;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.text.Collator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.document.DateTools;
-import org.apache.lucene.document.DateTools.Resolution;
-import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.queryParser.core.QueryNodeException;
-import org.apache.lucene.queryParser.core.config.FieldConfig;
-import org.apache.lucene.queryParser.core.config.QueryConfigHandler;
-import org.apache.lucene.queryParser.core.nodes.QueryNode;
-import org.apache.lucene.queryParser.core.parser.SyntaxParser;
-import org.apache.lucene.queryParser.core.processors.QueryNodeProcessor;
-import org.apache.lucene.queryParser.standard.builders.StandardQueryBuilder;
-import org.apache.lucene.queryParser.standard.builders.StandardQueryTreeBuilder;
-import org.apache.lucene.queryParser.standard.config.AllowLeadingWildcardAttribute;
-import org.apache.lucene.queryParser.standard.config.AnalyzerAttribute;
-import org.apache.lucene.queryParser.standard.config.DateResolutionAttribute;
-import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute;
-import org.apache.lucene.queryParser.standard.config.DefaultPhraseSlopAttribute;
-import org.apache.lucene.queryParser.standard.config.LocaleAttribute;
-import org.apache.lucene.queryParser.standard.config.LowercaseExpandedTermsAttribute;
-import org.apache.lucene.queryParser.standard.config.MultiTermRewriteMethodAttribute;
-import org.apache.lucene.queryParser.standard.config.PositionIncrementsAttribute;
-import org.apache.lucene.queryParser.standard.config.RangeCollatorAttribute;
-import org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler;
-import org.apache.lucene.queryParser.standard.parser.StandardSyntaxParser;
-import org.apache.lucene.queryParser.standard.processors.StandardQueryNodeProcessorPipeline;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.FuzzyQuery;
-import org.apache.lucene.search.MultiTermQuery;
-import org.apache.lucene.search.Query;
-
-/**
- * This class performs the query parsing using the new query parser
- * implementation, but keeps the old {@link QueryParser} API.
- *
- * This class should be used when the new query parser features are and the old
- * {@link QueryParser} API are needed at the same time.
- *
- * @deprecated this class will be removed soon, it's a temporary class to be
- * used along the transition from the old query parser to the new
- * one
- */
-@Deprecated
-public class QueryParserWrapper {
-
- /**
- * The default operator for parsing queries. Use
- * {@link QueryParserWrapper#setDefaultOperator} to change it.
- */
- static public enum Operator { OR, AND }
-
- // the nested class:
- /** Alternative form of QueryParser.Operator.AND */
- public static final Operator AND_OPERATOR = Operator.AND;
-
- /** Alternative form of QueryParser.Operator.OR */
- public static final Operator OR_OPERATOR = Operator.OR;
-
- /**
- * Returns a String where those characters that QueryParser expects to be
- * escaped are escaped by a preceding \
.
- */
- public static String escape(String s) {
- StringBuilder sb = new StringBuilder();
- for (int i = 0; i < s.length(); i++) {
- char c = s.charAt(i);
- // These characters are part of the query syntax and must be escaped
- if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')'
- || c == ':' || c == '^' || c == '[' || c == ']' || c == '\"'
- || c == '{' || c == '}' || c == '~' || c == '*' || c == '?'
- || c == '|' || c == '&') {
- sb.append('\\');
- }
- sb.append(c);
- }
- return sb.toString();
- }
-
- private SyntaxParser syntaxParser = new StandardSyntaxParser();
-
- private StandardQueryConfigHandler config;
-
- private StandardQueryParser qpHelper;
-
- private QueryNodeProcessor processorPipeline;
-
- private StandardQueryBuilder builder = new StandardQueryTreeBuilder();
-
- private String defaultField;
-
- public QueryParserWrapper(String defaultField, Analyzer analyzer) {
- this.defaultField = defaultField;
-
- this.qpHelper = new StandardQueryParser();
-
- this.config = (StandardQueryConfigHandler) qpHelper.getQueryConfigHandler();
-
- this.qpHelper.setAnalyzer(analyzer);
-
- this.processorPipeline = new StandardQueryNodeProcessorPipeline(this.config);
-
- }
-
- StandardQueryParser getQueryParserHelper() {
- return qpHelper;
- }
-
- public String getField() {
- return this.defaultField;
- }
-
- public Analyzer getAnalyzer() {
-
- if (this.config != null
- && this.config.hasAttribute(AnalyzerAttribute.class)) {
-
- return this.config.getAttribute(AnalyzerAttribute.class).getAnalyzer();
-
- }
-
- return null;
-
- }
-
- /**
- * Sets the {@link StandardQueryBuilder} used to generate a {@link Query}
- * object from the parsed and processed query node tree.
- *
- * @param builder the builder
- */
- public void setQueryBuilder(StandardQueryBuilder builder) {
- this.builder = builder;
- }
-
- /**
- * Sets the {@link QueryNodeProcessor} used to process the query node tree
- * generated by the
- * {@link org.apache.lucene.queryParser.standard.parser.StandardSyntaxParser}.
- *
- * @param processor the processor
- */
- public void setQueryProcessor(QueryNodeProcessor processor) {
- this.processorPipeline = processor;
- this.processorPipeline.setQueryConfigHandler(this.config);
-
- }
-
- /**
- * Sets the {@link QueryConfigHandler} used by the {@link QueryNodeProcessor}
- * set to this object.
- *
- * @param queryConfig the query config handler
- */
- public void setQueryConfig(StandardQueryConfigHandler queryConfig) {
- this.config = queryConfig;
-
- if (this.processorPipeline != null) {
- this.processorPipeline.setQueryConfigHandler(this.config);
- }
-
- }
-
- /**
- * Returns the query config handler used by this query parser
- *
- * @return the query config handler
- */
- public QueryConfigHandler getQueryConfigHandler() {
- return this.config;
- }
-
- /**
- * Returns {@link QueryNodeProcessor} used to process the query node tree
- * generated by the
- * {@link org.apache.lucene.queryParser.standard.parser.StandardSyntaxParser}.
- *
- * @return the query processor
- */
- public QueryNodeProcessor getQueryProcessor() {
- return this.processorPipeline;
- }
-
- public ParseException generateParseException() {
- return null;
- }
-
- public boolean getAllowLeadingWildcard() {
-
- if (this.config != null
- && this.config.hasAttribute(AllowLeadingWildcardAttribute.class)) {
-
- return this.config.getAttribute(AllowLeadingWildcardAttribute.class)
- .isAllowLeadingWildcard();
-
- }
-
- return false;
-
- }
-
- public MultiTermQuery.RewriteMethod getMultiTermRewriteMethod() {
-
- if (this.config != null
- && this.config.hasAttribute(MultiTermRewriteMethodAttribute.class)) {
-
- return this.config.getAttribute(MultiTermRewriteMethodAttribute.class)
- .getMultiTermRewriteMethod();
-
- }
-
- return MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
-
- }
-
- public Resolution getDateResolution(String fieldName) {
-
- if (this.config != null) {
- FieldConfig fieldConfig = this.config.getFieldConfig(fieldName);
-
- if (fieldConfig != null) {
-
- if (this.config.hasAttribute(DateResolutionAttribute.class)) {
-
- return this.config.getAttribute(DateResolutionAttribute.class)
- .getDateResolution();
-
- }
-
- }
-
- }
-
- return null;
-
- }
-
- public boolean getEnablePositionIncrements() {
-
- if (this.config != null
- && this.config.hasAttribute(PositionIncrementsAttribute.class)) {
-
- return this.config.getAttribute(PositionIncrementsAttribute.class)
- .isPositionIncrementsEnabled();
-
- }
-
- return false;
-
- }
-
- public float getFuzzyMinSim() {
- return FuzzyQuery.defaultMinSimilarity;
- }
-
- public int getFuzzyPrefixLength() {
- return FuzzyQuery.defaultPrefixLength;
- }
-
- public Locale getLocale() {
-
- if (this.config != null && this.config.hasAttribute(LocaleAttribute.class)) {
- return this.config.getAttribute(LocaleAttribute.class).getLocale();
- }
-
- return Locale.getDefault();
-
- }
-
- public boolean getLowercaseExpandedTerms() {
-
- if (this.config != null
- && this.config.hasAttribute(LowercaseExpandedTermsAttribute.class)) {
-
- return this.config.getAttribute(LowercaseExpandedTermsAttribute.class)
- .isLowercaseExpandedTerms();
-
- }
-
- return true;
-
- }
-
- public int getPhraseSlop() {
-
- if (this.config != null
- && this.config.hasAttribute(AllowLeadingWildcardAttribute.class)) {
-
- return this.config.getAttribute(DefaultPhraseSlopAttribute.class)
- .getDefaultPhraseSlop();
-
- }
-
- return 0;
-
- }
-
- public Collator getRangeCollator() {
-
- if (this.config != null
- && this.config.hasAttribute(RangeCollatorAttribute.class)) {
-
- return this.config.getAttribute(RangeCollatorAttribute.class)
- .getRangeCollator();
-
- }
-
- return null;
-
- }
-
- public boolean getUseOldRangeQuery() {
- if (getMultiTermRewriteMethod() == MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE) {
- return true;
- } else {
- return false;
- }
- }
-
- public Query parse(String query) throws ParseException {
-
- try {
- QueryNode queryTree = this.syntaxParser.parse(query, getField());
- queryTree = this.processorPipeline.process(queryTree);
- return this.builder.build(queryTree);
-
- } catch (QueryNodeException e) {
- throw new ParseException("parse exception");
- }
-
- }
-
- public void setAllowLeadingWildcard(boolean allowLeadingWildcard) {
- this.qpHelper.setAllowLeadingWildcard(allowLeadingWildcard);
- }
-
- public void setMultiTermRewriteMethod(MultiTermQuery.RewriteMethod method) {
- this.qpHelper.setMultiTermRewriteMethod(method);
- }
-
- public void setDateResolution(Resolution dateResolution) {
- this.qpHelper.setDateResolution(dateResolution);
- }
-
- private Map dateRes = new HashMap();
-
- public void setDateResolution(String fieldName, Resolution dateResolution) {
- dateRes.put(fieldName, dateResolution);
- this.qpHelper.setDateResolution(dateRes);
- }
-
- public void setDefaultOperator(Operator op) {
-
- this.qpHelper
- .setDefaultOperator(OR_OPERATOR.equals(op) ? org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator.OR
- : org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator.AND);
-
- }
-
- public Operator getDefaultOperator() {
-
- if (this.config != null
- && this.config.hasAttribute(DefaultOperatorAttribute.class)) {
-
- return (this.config.getAttribute(DefaultOperatorAttribute.class)
- .getOperator() == org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator.AND) ? AND_OPERATOR
- : OR_OPERATOR;
-
- }
-
- return OR_OPERATOR;
-
- }
-
- public void setEnablePositionIncrements(boolean enable) {
- this.qpHelper.setEnablePositionIncrements(enable);
- }
-
- public void setFuzzyMinSim(float fuzzyMinSim) {
- // TODO Auto-generated method stub
-
- }
-
- public void setFuzzyPrefixLength(int fuzzyPrefixLength) {
- // TODO Auto-generated method stub
-
- }
-
- public void setLocale(Locale locale) {
- this.qpHelper.setLocale(locale);
- }
-
- public void setLowercaseExpandedTerms(boolean lowercaseExpandedTerms) {
- this.qpHelper.setLowercaseExpandedTerms(lowercaseExpandedTerms);
- }
-
- public void setPhraseSlop(int phraseSlop) {
- this.qpHelper.setDefaultPhraseSlop(phraseSlop);
- }
-
- public void setRangeCollator(Collator rc) {
- this.qpHelper.setRangeCollator(rc);
- }
-
- public void setUseOldRangeQuery(boolean useOldRangeQuery) {
- if (useOldRangeQuery) {
- setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
- } else {
- setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
- }
- }
-
- protected Query getPrefixQuery(String field, String termStr)
- throws ParseException {
- throw new UnsupportedOperationException();
- }
-
- protected Query getWildcardQuery(String field, String termStr)
- throws ParseException {
- throw new UnsupportedOperationException();
- }
-
- protected Query getFuzzyQuery(String field, String termStr,
- float minSimilarity) throws ParseException {
- throw new UnsupportedOperationException();
- }
-
- /** @deprecated Use {@link #getFieldQuery(String, String, boolean)} instead */
- @Deprecated
- protected Query getFieldQuery(String field, String queryText) throws ParseException {
- return getFieldQuery(field, queryText, true);
- }
-
- /**
- * @exception ParseException throw in overridden method to disallow
- */
- protected Query getFieldQuery(String field, String queryText, boolean quoted)
- throws ParseException {
- throw new UnsupportedOperationException();
- }
-
- protected Query getBooleanQuery(List clauses, boolean disableCoord)
- throws ParseException {
- throw new UnsupportedOperationException();
- }
-
- /**
- * Base implementation delegates to {@link #getFieldQuery(String,String)}.
- * This method may be overridden, for example, to return a SpanNearQuery
- * instead of a PhraseQuery.
- *
- * @exception ParseException throw in overridden method to disallow
- */
- protected Query getFieldQuery(String field, String queryText, int slop)
- throws ParseException {
- throw new UnsupportedOperationException();
- }
-
- /**
- * @exception ParseException throw in overridden method to disallow
- */
- protected Query getRangeQuery(String field, String part1, String part2,
- boolean inclusive) throws ParseException {
- throw new UnsupportedOperationException();
- }
-
-}
diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/package.html b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/package.html
index 27ba181f295..5b5075f8350 100644
--- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/package.html
+++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/package.html
@@ -39,12 +39,5 @@ are used to reproduce the same behavior as the old query parser.
Check org.apache.lucene.queryParser.standard.StandardQueryParser to quick start using the Lucene query parser.
-
-There are 2 wrapper classes that extends QueryParser and MultiFieldQueryParser.
-The classes implement internally the new query parser structure. These 2
-classes are deprecated and should only be used when there is a need to use the
-old query parser interface.
-
-