LUCENE-2185: add @Deprecated annotations

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@895342 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2010-01-03 10:31:42 +00:00
parent cdac1f7113
commit d22b7a98cd
52 changed files with 128 additions and 0 deletions

View File

@ -65,6 +65,7 @@ public final class ArabicAnalyzer extends StopwordAnalyzerBase {
* @deprecated use {@link WordlistLoader#getWordSet(File, String)} directly
*/
// TODO make this private
@Deprecated
public static final String STOPWORDS_COMMENT = "#";
/**
@ -116,6 +117,7 @@ public final class ArabicAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #ArabicAnalyzer(Version, Set)} instead
*/
@Deprecated
public ArabicAnalyzer( Version matchVersion, String... stopwords ) {
this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords ));
}
@ -124,6 +126,7 @@ public final class ArabicAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #ArabicAnalyzer(Version, Set)} instead
*/
@Deprecated
public ArabicAnalyzer( Version matchVersion, Hashtable<?,?> stopwords ) {
this(matchVersion, stopwords.keySet());
}
@ -132,6 +135,7 @@ public final class ArabicAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words. Lines can be commented out using {@link #STOPWORDS_COMMENT}
* @deprecated use {@link #ArabicAnalyzer(Version, Set)} instead
*/
@Deprecated
public ArabicAnalyzer( Version matchVersion, File stopwords ) throws IOException {
this(matchVersion, WordlistLoader.getWordSet( stopwords, STOPWORDS_COMMENT));
}

View File

@ -59,6 +59,7 @@ public final class BulgarianAnalyzer extends StopwordAnalyzerBase {
* @deprecated use {@link WordlistLoader#getWordSet(File, String)} directly
*/
//TODO make this private
@Deprecated
public static final String STOPWORDS_COMMENT = "#";
/**

View File

@ -58,6 +58,7 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
* @deprecated use {@link #getDefaultStopSet()} instead
*/
// TODO make this private in 3.1
@Deprecated
public final static String[] BRAZILIAN_STOP_WORDS = {
"a","ainda","alem","ambas","ambos","antes",
"ao","aonde","aos","apos","aquele","aqueles",
@ -138,6 +139,7 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set)} instead
*/
@Deprecated
public BrazilianAnalyzer(Version matchVersion, String... stopwords) {
this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords));
}
@ -146,6 +148,7 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set)} instead
*/
@Deprecated
public BrazilianAnalyzer(Version matchVersion, Map<?,?> stopwords) {
this(matchVersion, stopwords.keySet());
}
@ -154,6 +157,7 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set)} instead
*/
@Deprecated
public BrazilianAnalyzer(Version matchVersion, File stopwords)
throws IOException {
this(matchVersion, WordlistLoader.getWordSet(stopwords));
@ -163,6 +167,7 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
* Builds an exclusionlist from an array of Strings.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable( String... exclusionlist ) {
excltable = StopFilter.makeStopSet( matchVersion, exclusionlist );
setPreviousTokenStream(null); // force a new stemmer to be created
@ -171,6 +176,7 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
* Builds an exclusionlist from a {@link Map}.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable( Map<?,?> exclusionlist ) {
excltable = new HashSet<Object>(exclusionlist.keySet());
setPreviousTokenStream(null); // force a new stemmer to be created
@ -179,6 +185,7 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
* Builds an exclusionlist from the words contained in the given file.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable( File exclusionlist ) throws IOException {
excltable = WordlistLoader.getWordSet( exclusionlist );
setPreviousTokenStream(null); // force a new stemmer to be created

View File

@ -45,6 +45,7 @@ public final class CJKAnalyzer extends StopwordAnalyzerBase {
*/
// TODO make this final in 3.1 -
// this might be revised and merged with StopFilter stop words too
@Deprecated
public final static String[] STOP_WORDS = {
"a", "and", "are", "as", "at", "be",
"but", "by", "for", "if", "in",
@ -99,6 +100,7 @@ public final class CJKAnalyzer extends StopwordAnalyzerBase {
* @param stopWords stop word array
* @deprecated use {@link #CJKAnalyzer(Version, Set)} instead
*/
@Deprecated
public CJKAnalyzer(Version matchVersion, String... stopWords) {
super(matchVersion, StopFilter.makeStopSet(matchVersion, stopWords));
}

View File

@ -80,6 +80,7 @@ public abstract class CompoundWordTokenFilterBase extends TokenFilter {
/**
* @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, String[], int, int, int, boolean)} instead
*/
@Deprecated
protected CompoundWordTokenFilterBase(TokenStream input, String[] dictionary, int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
this(Version.LUCENE_30, input, makeDictionary(dictionary),minWordSize,minSubwordSize,maxSubwordSize, onlyLongestMatch);
}
@ -87,6 +88,7 @@ public abstract class CompoundWordTokenFilterBase extends TokenFilter {
/**
* @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, String[], boolean)} instead
*/
@Deprecated
protected CompoundWordTokenFilterBase(TokenStream input, String[] dictionary, boolean onlyLongestMatch) {
this(Version.LUCENE_30, input, makeDictionary(dictionary),DEFAULT_MIN_WORD_SIZE,DEFAULT_MIN_SUBWORD_SIZE,DEFAULT_MAX_SUBWORD_SIZE, onlyLongestMatch);
}
@ -94,6 +96,7 @@ public abstract class CompoundWordTokenFilterBase extends TokenFilter {
/**
* @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, Set, boolean)} instead
*/
@Deprecated
protected CompoundWordTokenFilterBase(TokenStream input, Set<?> dictionary, boolean onlyLongestMatch) {
this(Version.LUCENE_30, input, dictionary,DEFAULT_MIN_WORD_SIZE,DEFAULT_MIN_SUBWORD_SIZE,DEFAULT_MAX_SUBWORD_SIZE, onlyLongestMatch);
}
@ -101,6 +104,7 @@ public abstract class CompoundWordTokenFilterBase extends TokenFilter {
/**
* @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, String[])} instead
*/
@Deprecated
protected CompoundWordTokenFilterBase(TokenStream input, String[] dictionary) {
this(Version.LUCENE_30, input, makeDictionary(dictionary),DEFAULT_MIN_WORD_SIZE,DEFAULT_MIN_SUBWORD_SIZE,DEFAULT_MAX_SUBWORD_SIZE, false);
}
@ -108,6 +112,7 @@ public abstract class CompoundWordTokenFilterBase extends TokenFilter {
/**
* @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, Set)} instead
*/
@Deprecated
protected CompoundWordTokenFilterBase(TokenStream input, Set<?> dictionary) {
this(Version.LUCENE_30, input, dictionary,DEFAULT_MIN_WORD_SIZE,DEFAULT_MIN_SUBWORD_SIZE,DEFAULT_MAX_SUBWORD_SIZE, false);
}
@ -115,6 +120,7 @@ public abstract class CompoundWordTokenFilterBase extends TokenFilter {
/**
* @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, Set, int, int, int, boolean)} instead
*/
@Deprecated
protected CompoundWordTokenFilterBase(TokenStream input, Set<?> dictionary, int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
this(Version.LUCENE_30, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);
}

View File

@ -46,6 +46,7 @@ public class DictionaryCompoundWordTokenFilter extends CompoundWordTokenFilterBa
* @param onlyLongestMatch Add only the longest matching subword to the stream
* @deprecated use {@link #DictionaryCompoundWordTokenFilter(Version, TokenStream, String[], int, int, int, boolean)} instead
*/
@Deprecated
public DictionaryCompoundWordTokenFilter(TokenStream input, String[] dictionary,
int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
super(Version.LUCENE_30, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);
@ -58,6 +59,7 @@ public class DictionaryCompoundWordTokenFilter extends CompoundWordTokenFilterBa
* @param dictionary the word dictionary to match against
* @deprecated use {@link #DictionaryCompoundWordTokenFilter(Version, TokenStream, String[])} instead
*/
@Deprecated
public DictionaryCompoundWordTokenFilter(TokenStream input, String[] dictionary) {
super(Version.LUCENE_30, input, dictionary);
}
@ -70,6 +72,7 @@ public class DictionaryCompoundWordTokenFilter extends CompoundWordTokenFilterBa
* lower case strings.
* @deprecated use {@link #DictionaryCompoundWordTokenFilter(Version, TokenStream, Set)} instead
*/
@Deprecated
public DictionaryCompoundWordTokenFilter(TokenStream input, Set dictionary) {
super(Version.LUCENE_30, input, dictionary);
}
@ -86,6 +89,7 @@ public class DictionaryCompoundWordTokenFilter extends CompoundWordTokenFilterBa
* @param onlyLongestMatch Add only the longest matching subword to the stream
* @deprecated use {@link #DictionaryCompoundWordTokenFilter(Version, TokenStream, Set, int, int, int, boolean)} instead
*/
@Deprecated
public DictionaryCompoundWordTokenFilter(TokenStream input, Set dictionary,
int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
super(Version.LUCENE_30, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);

View File

@ -167,6 +167,7 @@ public class HyphenationCompoundWordTokenFilter extends
* @param onlyLongestMatch Add only the longest matching subword to the stream
* @deprecated use {@link #HyphenationCompoundWordTokenFilter(Version, TokenStream, HyphenationTree, String[], int, int, int, boolean)} instead.
*/
@Deprecated
public HyphenationCompoundWordTokenFilter(TokenStream input,
HyphenationTree hyphenator, String[] dictionary, int minWordSize,
int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
@ -182,6 +183,7 @@ public class HyphenationCompoundWordTokenFilter extends
* @param dictionary the word dictionary to match against
* @deprecated use {@link #HyphenationCompoundWordTokenFilter(Version, TokenStream, HyphenationTree, String[])} instead.
*/
@Deprecated
public HyphenationCompoundWordTokenFilter(TokenStream input,
HyphenationTree hyphenator, String[] dictionary) {
this(Version.LUCENE_30, input, hyphenator, makeDictionary(dictionary), DEFAULT_MIN_WORD_SIZE,
@ -197,6 +199,7 @@ public class HyphenationCompoundWordTokenFilter extends
* lower case strings.
* @deprecated use {@link #HyphenationCompoundWordTokenFilter(Version, TokenStream, HyphenationTree, Set)} instead.
*/
@Deprecated
public HyphenationCompoundWordTokenFilter(TokenStream input,
HyphenationTree hyphenator, Set dictionary) {
this(Version.LUCENE_30, input, hyphenator, dictionary, DEFAULT_MIN_WORD_SIZE,
@ -218,6 +221,7 @@ public class HyphenationCompoundWordTokenFilter extends
* @param onlyLongestMatch Add only the longest matching subword to the stream
* @deprecated use {@link #HyphenationCompoundWordTokenFilter(Version, TokenStream, HyphenationTree, Set, int, int, int, boolean)} instead.
*/
@Deprecated
public HyphenationCompoundWordTokenFilter(TokenStream input,
HyphenationTree hyphenator, Set dictionary, int minWordSize,
int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {

View File

@ -62,6 +62,7 @@ public final class CzechAnalyzer extends ReusableAnalyzerBase {
* @deprecated use {@link #getDefaultStopSet()} instead
*/
// TODO make this private in 3.1
@Deprecated
public final static String[] CZECH_STOP_WORDS = {
"a","s","k","o","i","u","v","z","dnes","cz","t\u00edmto","bude\u0161","budem",
"byli","jse\u0161","m\u016fj","sv\u00fdm","ta","tomto","tohle","tuto","tyto",
@ -136,6 +137,7 @@ public final class CzechAnalyzer extends ReusableAnalyzerBase {
* @param stopwords a stopword set
* @deprecated use {@link #CzechAnalyzer(Version, Set)} instead
*/
@Deprecated
public CzechAnalyzer(Version matchVersion, String... stopwords) {
this(matchVersion, StopFilter.makeStopSet( matchVersion, stopwords ));
}
@ -148,6 +150,7 @@ public final class CzechAnalyzer extends ReusableAnalyzerBase {
* @param stopwords a stopword set
* @deprecated use {@link #CzechAnalyzer(Version, Set)} instead
*/
@Deprecated
public CzechAnalyzer(Version matchVersion, HashSet<?> stopwords) {
this(matchVersion, (Set<?>)stopwords);
}
@ -160,6 +163,7 @@ public final class CzechAnalyzer extends ReusableAnalyzerBase {
* @param stopwords a file containing stopwords
* @deprecated use {@link #CzechAnalyzer(Version, Set)} instead
*/
@Deprecated
public CzechAnalyzer(Version matchVersion, File stopwords ) throws IOException {
this(matchVersion, (Set<?>)WordlistLoader.getWordSet( stopwords ));
}
@ -172,6 +176,7 @@ public final class CzechAnalyzer extends ReusableAnalyzerBase {
* and {@link #CzechAnalyzer(Version, Set)} instead
*/
// TODO extend StopwordAnalyzerBase once this method is gone!
@Deprecated
public void loadStopWords( InputStream wordfile, String encoding ) {
setPreviousTokenStream(null); // force a new stopfilter to be created
if ( wordfile == null ) {

View File

@ -60,6 +60,7 @@ public final class GermanAnalyzer extends StopwordAnalyzerBase {
* @deprecated use {@link #getDefaultStopSet()} instead
*/
//TODO make this private in 3.1
@Deprecated
public final static String[] GERMAN_STOP_WORDS = {
"einer", "eine", "eines", "einem", "einen",
"der", "die", "das", "dass", "daß",
@ -137,6 +138,7 @@ public final class GermanAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #GermanAnalyzer(Version, Set)}
*/
@Deprecated
public GermanAnalyzer(Version matchVersion, String... stopwords) {
this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords));
}
@ -145,6 +147,7 @@ public final class GermanAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #GermanAnalyzer(Version, Set)}
*/
@Deprecated
public GermanAnalyzer(Version matchVersion, Map<?,?> stopwords) {
this(matchVersion, stopwords.keySet());
@ -154,6 +157,7 @@ public final class GermanAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #GermanAnalyzer(Version, Set)}
*/
@Deprecated
public GermanAnalyzer(Version matchVersion, File stopwords) throws IOException {
this(matchVersion, WordlistLoader.getWordSet(stopwords));
}
@ -162,6 +166,7 @@ public final class GermanAnalyzer extends StopwordAnalyzerBase {
* Builds an exclusionlist from an array of Strings.
* @deprecated use {@link #GermanAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable(String[] exclusionlist) {
exclusionSet = StopFilter.makeStopSet(matchVersion, exclusionlist);
setPreviousTokenStream(null); // force a new stemmer to be created
@ -171,6 +176,7 @@ public final class GermanAnalyzer extends StopwordAnalyzerBase {
* Builds an exclusionlist from a {@link Map}
* @deprecated use {@link #GermanAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable(Map<?,?> exclusionlist) {
exclusionSet = new HashSet<Object>(exclusionlist.keySet());
setPreviousTokenStream(null); // force a new stemmer to be created
@ -180,6 +186,7 @@ public final class GermanAnalyzer extends StopwordAnalyzerBase {
* Builds an exclusionlist from the words contained in the given file.
* @deprecated use {@link #GermanAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable(File exclusionlist) throws IOException {
exclusionSet = WordlistLoader.getWordSet(exclusionlist);
setPreviousTokenStream(null); // force a new stemmer to be created

View File

@ -95,6 +95,7 @@ public final class GreekAnalyzer extends StopwordAnalyzerBase
* @param stopwords Array of stopwords to use.
* @deprecated use {@link #GreekAnalyzer(Version, Set)} instead
*/
@Deprecated
public GreekAnalyzer(Version matchVersion, String... stopwords)
{
this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords));
@ -104,6 +105,7 @@ public final class GreekAnalyzer extends StopwordAnalyzerBase
* Builds an analyzer with the given stop words.
* @deprecated use {@link #GreekAnalyzer(Version, Set)} instead
*/
@Deprecated
public GreekAnalyzer(Version matchVersion, Map<?,?> stopwords)
{
this(matchVersion, stopwords.keySet());

View File

@ -111,6 +111,7 @@ public final class PersianAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #PersianAnalyzer(Version, Set)} instead
*/
@Deprecated
public PersianAnalyzer(Version matchVersion, String... stopwords) {
this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords));
}
@ -119,6 +120,7 @@ public final class PersianAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #PersianAnalyzer(Version, Set)} instead
*/
@Deprecated
public PersianAnalyzer(Version matchVersion, Hashtable<?, ?> stopwords) {
this(matchVersion, stopwords.keySet());
}
@ -128,6 +130,7 @@ public final class PersianAnalyzer extends StopwordAnalyzerBase {
* using {@link #STOPWORDS_COMMENT}
* @deprecated use {@link #PersianAnalyzer(Version, Set)} instead
*/
@Deprecated
public PersianAnalyzer(Version matchVersion, File stopwords) throws IOException {
this(matchVersion, WordlistLoader.getWordSet(stopwords, STOPWORDS_COMMENT));
}

View File

@ -50,6 +50,7 @@ public final class ElisionFilter extends TokenFilter {
* @param articles a set of articles
* @deprecated use {@link #ElisionFilter(Version, TokenStream, Set)} instead
*/
@Deprecated
public void setArticles(Version matchVersion, Set<?> articles) {
this.articles = CharArraySet.unmodifiableSet(
CharArraySet.copy(matchVersion, articles));
@ -60,6 +61,7 @@ public final class ElisionFilter extends TokenFilter {
* @param articles a set of articles
* @deprecated use {@link #setArticles(Version, Set)} instead
*/
@Deprecated
public void setArticles(Set<?> articles) {
setArticles(Version.LUCENE_CURRENT, articles);
}
@ -74,6 +76,7 @@ public final class ElisionFilter extends TokenFilter {
* Constructs an elision filter with standard stop words
* @deprecated use {@link #ElisionFilter(Version, TokenStream)} instead
*/
@Deprecated
protected ElisionFilter(TokenStream input) {
this(Version.LUCENE_30, input);
}
@ -82,6 +85,7 @@ public final class ElisionFilter extends TokenFilter {
* Constructs an elision filter with a Set of stop words
* @deprecated use {@link #ElisionFilter(Version, TokenStream, Set)} instead
*/
@Deprecated
public ElisionFilter(TokenStream input, Set<?> articles) {
this(Version.LUCENE_30, input, articles);
}
@ -103,6 +107,7 @@ public final class ElisionFilter extends TokenFilter {
* Constructs an elision filter with an array of stop words
* @deprecated use {@link #ElisionFilter(Version, TokenStream, Set)} instead
*/
@Deprecated
public ElisionFilter(TokenStream input, String[] articles) {
this(Version.LUCENE_CURRENT, input,
new CharArraySet(Version.LUCENE_CURRENT,

View File

@ -68,6 +68,7 @@ public final class FrenchAnalyzer extends StopwordAnalyzerBase {
* @deprecated use {@link #getDefaultStopSet()} instead
*/
// TODO make this private in 3.1
@Deprecated
public final static String[] FRENCH_STOP_WORDS = {
"a", "afin", "ai", "ainsi", "après", "attendu", "au", "aujourd", "auquel", "aussi",
"autre", "autres", "aux", "auxquelles", "auxquels", "avait", "avant", "avec", "avoir",
@ -154,6 +155,7 @@ public final class FrenchAnalyzer extends StopwordAnalyzerBase {
* Builds an analyzer with the given stop words.
* @deprecated use {@link #FrenchAnalyzer(Version, Set)} instead
*/
@Deprecated
public FrenchAnalyzer(Version matchVersion, String... stopwords) {
this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords));
}
@ -163,6 +165,7 @@ public final class FrenchAnalyzer extends StopwordAnalyzerBase {
* @throws IOException
* @deprecated use {@link #FrenchAnalyzer(Version, Set)} instead
*/
@Deprecated
public FrenchAnalyzer(Version matchVersion, File stopwords) throws IOException {
this(matchVersion, WordlistLoader.getWordSet(stopwords));
}
@ -171,6 +174,7 @@ public final class FrenchAnalyzer extends StopwordAnalyzerBase {
* Builds an exclusionlist from an array of Strings.
* @deprecated use {@link #FrenchAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable(String... exclusionlist) {
excltable = StopFilter.makeStopSet(matchVersion, exclusionlist);
setPreviousTokenStream(null); // force a new stemmer to be created
@ -180,6 +184,7 @@ public final class FrenchAnalyzer extends StopwordAnalyzerBase {
* Builds an exclusionlist from a Map.
* @deprecated use {@link #FrenchAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable(Map<?,?> exclusionlist) {
excltable = new HashSet<Object>(exclusionlist.keySet());
setPreviousTokenStream(null); // force a new stemmer to be created
@ -190,6 +195,7 @@ public final class FrenchAnalyzer extends StopwordAnalyzerBase {
* @throws IOException
* @deprecated use {@link #FrenchAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable(File exclusionlist) throws IOException {
excltable = new HashSet<Object>(WordlistLoader.getWordSet(exclusionlist));
setPreviousTokenStream(null); // force a new stemmer to be created

View File

@ -57,6 +57,7 @@ public final class DutchAnalyzer extends Analyzer {
* List of typical Dutch stopwords.
* @deprecated use {@link #getDefaultStopSet()} instead
*/
@Deprecated
public final static String[] DUTCH_STOP_WORDS =
{
"de", "en", "van", "ik", "te", "dat", "die", "in", "een",
@ -128,6 +129,7 @@ public final class DutchAnalyzer extends Analyzer {
* @param stopwords
* @deprecated use {@link #DutchAnalyzer(Version, Set)} instead
*/
@Deprecated
public DutchAnalyzer(Version matchVersion, String... stopwords) {
this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords));
}
@ -138,6 +140,7 @@ public final class DutchAnalyzer extends Analyzer {
* @param stopwords
* @deprecated use {@link #DutchAnalyzer(Version, Set)} instead
*/
@Deprecated
public DutchAnalyzer(Version matchVersion, HashSet<?> stopwords) {
this(matchVersion, (Set<?>)stopwords);
}
@ -148,6 +151,7 @@ public final class DutchAnalyzer extends Analyzer {
* @param stopwords
* @deprecated use {@link #DutchAnalyzer(Version, Set)} instead
*/
@Deprecated
public DutchAnalyzer(Version matchVersion, File stopwords) {
// this is completely broken!
try {
@ -165,6 +169,7 @@ public final class DutchAnalyzer extends Analyzer {
* @param exclusionlist
* @deprecated use {@link #DutchAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable(String... exclusionlist) {
excltable = StopFilter.makeStopSet(matchVersion, exclusionlist);
setPreviousTokenStream(null); // force a new stemmer to be created
@ -174,6 +179,7 @@ public final class DutchAnalyzer extends Analyzer {
* Builds an exclusionlist from a Hashtable.
* @deprecated use {@link #DutchAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable(HashSet<?> exclusionlist) {
excltable = exclusionlist;
setPreviousTokenStream(null); // force a new stemmer to be created
@ -183,6 +189,7 @@ public final class DutchAnalyzer extends Analyzer {
* Builds an exclusionlist from the words contained in the given file.
* @deprecated use {@link #DutchAnalyzer(Version, Set, Set)} instead
*/
@Deprecated
public void setStemExclusionTable(File exclusionlist) {
try {
excltable = org.apache.lucene.analysis.WordlistLoader.getWordSet(exclusionlist);

View File

@ -78,6 +78,7 @@ public final class ReverseStringFilter extends TokenFilter {
* @deprecated use {@link #ReverseStringFilter(Version, TokenStream)}
* instead. This constructor will be removed in Lucene 4.0
*/
@Deprecated
public ReverseStringFilter(TokenStream in) {
this(in, NOMARKER);
}
@ -95,6 +96,7 @@ public final class ReverseStringFilter extends TokenFilter {
* @deprecated use {@link #ReverseStringFilter(Version, TokenStream, char)}
* instead. This constructor will be removed in Lucene 4.0
*/
@Deprecated
public ReverseStringFilter(TokenStream in, char marker) {
this(Version.LUCENE_30, in, marker);
}
@ -157,6 +159,7 @@ public final class ReverseStringFilter extends TokenFilter {
* @deprecated use {@link #reverse(Version, String)} instead. This method
* will be removed in Lucene 4.0
*/
@Deprecated
public static String reverse( final String input ){
return reverse(Version.LUCENE_30, input);
}
@ -180,6 +183,7 @@ public final class ReverseStringFilter extends TokenFilter {
* @deprecated use {@link #reverse(Version, char[])} instead. This
* method will be removed in Lucene 4.0
*/
@Deprecated
public static void reverse( final char[] buffer ){
reverse( buffer, 0, buffer.length );
}
@ -202,6 +206,7 @@ public final class ReverseStringFilter extends TokenFilter {
* @deprecated use {@link #reverse(Version, char[], int)} instead. This
* method will be removed in Lucene 4.0
*/
@Deprecated
public static void reverse( final char[] buffer, final int len ){
reverse( buffer, 0, len );
}
@ -229,6 +234,7 @@ public final class ReverseStringFilter extends TokenFilter {
* @deprecated use {@link #reverse(Version, char[], int, int)} instead. This
* method will be removed in Lucene 4.0
*/
@Deprecated
public static void reverse(char[] buffer, int start, int len ) {
reverseUnicode3(buffer, start, len);
}
@ -236,6 +242,7 @@ public final class ReverseStringFilter extends TokenFilter {
/**
* @deprecated Remove this when support for 3.0 indexes is no longer needed.
*/
@Deprecated
private static void reverseUnicode3( char[] buffer, int start, int len ){
if( len <= 1 ) return;
int num = len>>1;

View File

@ -72,6 +72,7 @@ public final class RussianAnalyzer extends StopwordAnalyzerBase
* Builds an analyzer with the given stop words.
* @deprecated use {@link #RussianAnalyzer(Version, Set)} instead
*/
@Deprecated
public RussianAnalyzer(Version matchVersion, String... stopwords) {
this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords));
}
@ -93,6 +94,7 @@ public final class RussianAnalyzer extends StopwordAnalyzerBase
* TODO: create a Set version of this ctor
* @deprecated use {@link #RussianAnalyzer(Version, Set)} instead
*/
@Deprecated
public RussianAnalyzer(Version matchVersion, Map<?,?> stopwords)
{
this(matchVersion, stopwords.keySet());

View File

@ -29,6 +29,7 @@ import org.apache.lucene.analysis.tokenattributes.TermAttribute;
* @deprecated Use {@link LowerCaseFilter} instead, which has the same
* functionality. This filter will be removed in Lucene 4.0
*/
@Deprecated
public final class RussianLowerCaseFilter extends TokenFilter
{
private TermAttribute termAtt;

View File

@ -40,6 +40,7 @@ public class TestCzechAnalyzer extends BaseTokenStreamTestCase {
/**
* @deprecated Remove this test when support for 3.0 indexes is no longer needed.
*/
@Deprecated
public void testStopWordLegacy() throws Exception {
assertAnalyzesTo(new CzechAnalyzer(Version.LUCENE_30), "Pokud mluvime o volnem",
new String[] { "mluvime", "volnem" });
@ -53,6 +54,7 @@ public class TestCzechAnalyzer extends BaseTokenStreamTestCase {
/**
* @deprecated Remove this test when support for 3.0 indexes is no longer needed.
*/
@Deprecated
public void testReusableTokenStreamLegacy() throws Exception {
Analyzer analyzer = new CzechAnalyzer(Version.LUCENE_30);
assertAnalyzesToReuse(analyzer, "Pokud mluvime o volnem", new String[] { "mluvime", "volnem" });
@ -69,6 +71,7 @@ public class TestCzechAnalyzer extends BaseTokenStreamTestCase {
* An input stream that always throws IOException for testing.
* @deprecated Remove this class when the loadStopWords method is removed.
*/
@Deprecated
private class UnreliableInputStream extends InputStream {
@Override
public int read() throws IOException {
@ -82,6 +85,7 @@ public class TestCzechAnalyzer extends BaseTokenStreamTestCase {
* this would cause a NPE when it is time to create the StopFilter.
* @deprecated Remove this test when the loadStopWords method is removed.
*/
@Deprecated
public void testInvalidStopWordFile() throws Exception {
CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_30);
cz.loadStopWords(new UnreliableInputStream(), "UTF-8");
@ -94,6 +98,7 @@ public class TestCzechAnalyzer extends BaseTokenStreamTestCase {
* when using reusable token streams.
* @deprecated Remove this test when the loadStopWords method is removed.
*/
@Deprecated
public void testStopWordFileReuse() throws Exception {
CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_30);
assertAnalyzesToReuse(cz, "Česká Republika",

View File

@ -41,6 +41,7 @@ public class TextFragment
* @deprecated Use {@link #TextFragment(CharSequence, int, int)} instead.
* This constructor will be removed in Lucene 4.0
*/
@Deprecated
public TextFragment(StringBuffer markedUpText,int textStartPos, int fragNum)
{
this.markedUpText=markedUpText;

View File

@ -156,6 +156,7 @@ public class ChainedFilter extends Filter
* switch to a different DocIdSet implementation yourself.
* This method will be removed in Lucene 4.0
**/
@Deprecated
protected final DocIdSet finalResult(OpenBitSetDISI result, int maxDocs) {
return result;
}

View File

@ -117,6 +117,7 @@ public class BooleanFilter extends Filter
* switch to a different DocIdSet implementation yourself.
* This method will be removed in Lucene 4.0
*/
@Deprecated
protected final DocIdSet finalResult(OpenBitSetDISI result, int maxDocs) {
return result;
}

View File

@ -36,6 +36,7 @@ import org.apache.lucene.search.Query;
* used along the transition from the old query parser to the new
* one
*/
@Deprecated
public class MultiFieldQueryParserWrapper extends QueryParserWrapper {
/**

View File

@ -64,6 +64,7 @@ import org.apache.lucene.search.Query;
* used along the transition from the old query parser to the new
* one
*/
@Deprecated
public class QueryParserWrapper {
/**

View File

@ -55,6 +55,7 @@ public final class SnowballAnalyzer extends Analyzer {
* Builds the named analyzer with the given stop words.
* @deprecated Use {@link #SnowballAnalyzer(Version, String, Set)} instead.
*/
@Deprecated
public SnowballAnalyzer(Version matchVersion, String name, String[] stopWords) {
this(matchVersion, name);
stopSet = StopFilter.makeStopSet(matchVersion, stopWords);

View File

@ -27,6 +27,7 @@ package org.apache.lucene.spatial.geometry.shape;
* @deprecated This has been replaced with more accurate
* math in {@link LLRect}. This class will be removed in a future release.
*/
@Deprecated
public class DistanceApproximation
{
private double m_testLat;

View File

@ -43,6 +43,7 @@ public class HTMLParser implements HTMLParserConstants {
/**
* @deprecated Use HTMLParser(FileInputStream) instead
*/
@Deprecated
public HTMLParser(File file) throws FileNotFoundException {
this(new FileInputStream(file));
}

View File

@ -70,6 +70,7 @@ public class HTMLParser {
/**
* @deprecated Use HTMLParser(FileInputStream) instead
*/
@Deprecated
public HTMLParser(File file) throws FileNotFoundException {
this(new FileInputStream(file));
}

View File

@ -85,11 +85,13 @@ public abstract class Analyzer implements Closeable {
}
/** @deprecated */
@Deprecated
protected boolean overridesTokenStreamMethod = false;
/** @deprecated This is only present to preserve
* back-compat of classes that subclass a core analyzer
* and override tokenStream but not reusableTokenStream */
@Deprecated
protected void setOverridesTokenStreamMethod(Class<? extends Analyzer> baseClass) {
try {
Method m = this.getClass().getMethod("tokenStream", String.class, Reader.class);

View File

@ -117,6 +117,7 @@ public class CharArraySet extends AbstractSet<Object> {
* otherwise <code>true</code>.
* @deprecated use {@link #CharArraySet(Version, int, boolean)} instead
*/
@Deprecated
public CharArraySet(int startSize, boolean ignoreCase) {
this(Version.LUCENE_30, startSize, ignoreCase);
}
@ -131,6 +132,7 @@ public class CharArraySet extends AbstractSet<Object> {
* otherwise <code>true</code>.
* @deprecated use {@link #CharArraySet(Version, Collection, boolean)} instead
*/
@Deprecated
public CharArraySet(Collection<? extends Object> c, boolean ignoreCase) {
this(Version.LUCENE_30, c.size(), ignoreCase);
addAll(c);
@ -381,6 +383,7 @@ public class CharArraySet extends AbstractSet<Object> {
* preserved.
* @deprecated use {@link #copy(Version, Set)} instead.
*/
@Deprecated
public static CharArraySet copy(final Set<?> set) {
if(set == EMPTY_SET)
return EMPTY_SET;

View File

@ -31,6 +31,7 @@ import org.apache.lucene.analysis.tokenattributes.TermAttribute;
* This class is included for use with existing
* indexes and will be removed in a future release (possibly Lucene 4.0).
*/
@Deprecated
public final class ISOLatin1AccentFilter extends TokenFilter {
public ISOLatin1AccentFilter(TokenStream input) {
super(input);

View File

@ -50,6 +50,7 @@ public final class LowerCaseFilter extends TokenFilter {
/**
* @deprecated Use {@link #LowerCaseFilter(Version, TokenStream)} instead.
*/
@Deprecated
public LowerCaseFilter(TokenStream in) {
this(Version.LUCENE_30, in);
}

View File

@ -64,6 +64,7 @@ public final class StopFilter extends TokenFilter {
* @param ignoreCase if true, all words are lower cased first
* @deprecated use {@link #StopFilter(Version, TokenStream, Set, boolean)} instead
*/
@Deprecated
public StopFilter(boolean enablePositionIncrements, TokenStream input, Set<?> stopWords, boolean ignoreCase)
{
this(Version.LUCENE_30, enablePositionIncrements, input, stopWords, ignoreCase);
@ -117,6 +118,7 @@ public final class StopFilter extends TokenFilter {
* @see #makeStopSet(Version, java.lang.String[])
* @deprecated use {@link #StopFilter(Version, TokenStream, Set)} instead
*/
@Deprecated
public StopFilter(boolean enablePositionIncrements, TokenStream in, Set<?> stopWords) {
this(Version.LUCENE_CURRENT, enablePositionIncrements, in, stopWords, false);
}
@ -148,6 +150,7 @@ public final class StopFilter extends TokenFilter {
* @see #makeStopSet(Version, java.lang.String[], boolean) passing false to ignoreCase
* @deprecated use {@link #makeStopSet(Version, String...)} instead
*/
@Deprecated
public static final Set<Object> makeStopSet(String... stopWords) {
return makeStopSet(Version.LUCENE_30, stopWords, false);
}
@ -176,6 +179,7 @@ public final class StopFilter extends TokenFilter {
* @see #makeStopSet(Version, java.lang.String[], boolean) passing false to ignoreCase
* @deprecated use {@link #makeStopSet(Version, List)} instead
*/
@Deprecated
public static final Set<Object> makeStopSet(List<?> stopWords) {
return makeStopSet(Version.LUCENE_30, stopWords, false);
}
@ -202,6 +206,7 @@ public final class StopFilter extends TokenFilter {
* @return a Set containing the words
* @deprecated use {@link #makeStopSet(Version, String[], boolean)} instead;
*/
@Deprecated
public static final Set<Object> makeStopSet(String[] stopWords, boolean ignoreCase) {
return makeStopSet(Version.LUCENE_30, stopWords, ignoreCase);
}
@ -226,6 +231,7 @@ public final class StopFilter extends TokenFilter {
* @return A Set ({@link CharArraySet}) containing the words
* @deprecated use {@link #makeStopSet(Version, List, boolean)} instead
*/
@Deprecated
public static final Set<Object> makeStopSet(List<?> stopWords, boolean ignoreCase){
return makeStopSet(Version.LUCENE_30, stopWords, ignoreCase);
}
@ -271,6 +277,7 @@ public final class StopFilter extends TokenFilter {
* or later, it returns true.
* @deprecated use {@link #StopFilter(Version, TokenStream, Set)} instead
*/
@Deprecated
public static boolean getEnablePositionIncrementsVersionDefault(Version matchVersion) {
return matchVersion.onOrAfter(Version.LUCENE_29);
}

View File

@ -71,6 +71,7 @@ public final class StandardTokenizer extends Tokenizer {
* @deprecated this solves a bug where HOSTs that end with '.' are identified
* as ACRONYMs.
*/
@Deprecated
public static final int ACRONYM_DEP = 8;
/** String token types that correspond to token type int constants */
@ -227,6 +228,7 @@ public final class StandardTokenizer extends Tokenizer {
*
* @deprecated Remove in 3.X and make true the only valid value
*/
@Deprecated
public boolean isReplaceInvalidAcronym() {
return replaceInvalidAcronym;
}
@ -238,6 +240,7 @@ public final class StandardTokenizer extends Tokenizer {
*
* See https://issues.apache.org/jira/browse/LUCENE-1068
*/
@Deprecated
public void setReplaceInvalidAcronym(boolean replaceInvalidAcronym) {
this.replaceInvalidAcronym = replaceInvalidAcronym;
}

View File

@ -354,6 +354,7 @@ public static final int CJ = StandardTokenizer.CJ;
* @deprecated this solves a bug where HOSTs that end with '.' are identified
* as ACRONYMs.
*/
@Deprecated
public static final int ACRONYM_DEP = StandardTokenizer.ACRONYM_DEP;
public static final String [] TOKEN_TYPES = StandardTokenizer.TOKEN_TYPES;

View File

@ -55,6 +55,7 @@ public static final int CJ = StandardTokenizer.CJ;
* @deprecated this solves a bug where HOSTs that end with '.' are identified
* as ACRONYMs.
*/
@Deprecated
public static final int ACRONYM_DEP = StandardTokenizer.ACRONYM_DEP;
public static final String [] TOKEN_TYPES = StandardTokenizer.TOKEN_TYPES;

View File

@ -57,6 +57,7 @@ import java.util.Calendar; // for javadoc
* This class is included for use with existing
* indices and will be removed in a future release (possibly Lucene 4.0).
*/
@Deprecated
public class DateField {
private DateField() {}

View File

@ -44,6 +44,7 @@ import org.apache.lucene.util.NumericUtils; // for javadocs
* This class is included for use with existing
* indices and will be removed in a future release (possibly Lucene 4.0).
*/
@Deprecated
public class NumberTools {
private static final int RADIX = 36;

View File

@ -412,6 +412,7 @@ final class FieldsReader implements Cloneable {
private int toRead;
private long pointer;
/** @deprecated Only kept for backward-compatbility with <3.0 indexes. Will be removed in 4.0. */
@Deprecated
private boolean isCompressed;
public LazyField(String name, Field.Store store, int toRead, long pointer, boolean isBinary, boolean isCompressed) {

View File

@ -32,6 +32,7 @@ final class FieldsWriter
static final byte FIELD_IS_BINARY = 0x2;
/** @deprecated Kept for backwards-compatibility with <3.0 indexes; will be removed in 4.0 */
@Deprecated
static final byte FIELD_IS_COMPRESSED = 0x4;
// Original format

View File

@ -1276,6 +1276,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
* We do it with R/W access for the tests (BW compatibility)
* @deprecated Remove this when tests are fixed!
*/
@Deprecated
static SegmentReader getOnlySegmentReader(Directory dir) throws IOException {
return getOnlySegmentReader(IndexReader.open(dir,false));
}

View File

@ -373,6 +373,7 @@ public abstract class FSDirectory extends Directory {
}
/** @deprecated Use {@link #getDirectory} instead. */
@Deprecated
public File getFile() {
return getDirectory();
}

View File

@ -146,6 +146,7 @@ public abstract class IndexInput implements Cloneable,Closeable {
* instead, and construct the string
* from those utf8 bytes
*/
@Deprecated
public void readChars(char[] buffer, int start, int length)
throws IOException {
final int end = start + length;
@ -174,6 +175,7 @@ public abstract class IndexInput implements Cloneable,Closeable {
* @deprecated this method operates on old "modified utf8" encoded
* strings
*/
@Deprecated
public void skipChars(int length) throws IOException{
for (int i = 0; i < length; i++) {
byte b = readByte();

View File

@ -114,6 +114,7 @@ public abstract class IndexOutput implements Closeable {
* @deprecated -- please pre-convert to utf8 bytes
* instead or use {@link #writeString}
*/
@Deprecated
public void writeChars(String s, int start, int length)
throws IOException {
final int end = start + length;
@ -139,6 +140,7 @@ public abstract class IndexOutput implements Closeable {
* @param length the number of characters in the sequence
* @deprecated -- please pre-convert to utf8 bytes instead or use {@link #writeString}
*/
@Deprecated
public void writeChars(char[] s, int start, int length)
throws IOException {
final int end = start + length;

View File

@ -39,6 +39,7 @@ public class NoLockFactory extends LockFactory {
* @see #getNoLockFactory()
*/
// make private in 4.0!
@Deprecated
public NoLockFactory() {}
public static NoLockFactory getNoLockFactory() {

View File

@ -27,6 +27,7 @@ import java.util.Map;
* A serializable Enum class.
* @deprecated Use Java 5 enum, will be removed in a later Lucene 3.x release.
*/
@Deprecated
@SuppressWarnings("serial")
public abstract class Parameter implements Serializable
{

View File

@ -28,6 +28,7 @@ import java.util.Map;
* @deprecated Lucene's internal use of this class has now
* switched to {@link DoubleBarrelLRUCache}.
*/
@Deprecated
public class SimpleLRUCache<K,V> extends SimpleMapCache<K,V> {
private final static float LOADFACTOR = 0.75f;

View File

@ -29,6 +29,7 @@ import java.util.Set;
* @deprecated Lucene's internal use of this class has now
* switched to {@link DoubleBarrelLRUCache}.
*/
@Deprecated
public class SimpleMapCache<K,V> extends Cache<K,V> {
protected Map<K,V> map;

View File

@ -154,6 +154,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
* @deprecated remove this when lucene 3.0 "broken unicode 4" support
* is no longer needed.
*/
@Deprecated
private static class LowerCaseWhitespaceAnalyzerBWComp extends Analyzer {
@Override
@ -215,6 +216,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
* @deprecated remove this test when lucene 3.0 "broken unicode 4" support
* is no longer needed.
*/
@Deprecated
public void testLowerCaseFilterBWComp() throws IOException {
Analyzer a = new LowerCaseWhitespaceAnalyzerBWComp();
// BMP

View File

@ -254,6 +254,7 @@ public class TestCharArraySet extends LuceneTestCase {
* @deprecated remove this test when lucene 3.0 "broken unicode 4" support is
* no longer needed.
*/
@Deprecated
public void testSupplementaryCharsBWCompat() {
String missing = "Term %s is missing in the set";
String falsePos = "Term %s is in the set but shouldn't";
@ -285,6 +286,7 @@ public class TestCharArraySet extends LuceneTestCase {
* @deprecated remove this test when lucene 3.0 "broken unicode 4" support is
* no longer needed.
*/
@Deprecated
public void testSingleHighSurrogateBWComapt() {
String missing = "Term %s is missing in the set";
String falsePos = "Term %s is in the set but shouldn't";

View File

@ -456,6 +456,7 @@ public class CheckHits {
* @deprecated
* @see CheckHits#EXPLAIN_SCORE_TOLERANCE_DELTA
*/
@Deprecated
public static float SCORE_TOLERANCE_DELTA = 0.00005f;
Query q;

View File

@ -84,6 +84,7 @@ public class _TestUtil {
/** Use only for testing.
* @deprecated -- in 3.0 we can use Arrays.toString
* instead */
@Deprecated
public static String arrayToString(int[] array) {
StringBuilder buf = new StringBuilder();
buf.append("[");
@ -100,6 +101,7 @@ public class _TestUtil {
/** Use only for testing.
* @deprecated -- in 3.0 we can use Arrays.toString
* instead */
@Deprecated
public static String arrayToString(Object[] array) {
StringBuilder buf = new StringBuilder();
buf.append("[");

View File

@ -18,6 +18,7 @@ package org.apache.lucene.util.cache;
*/
/** @deprecated */
@Deprecated
public class TestSimpleLRUCache extends BaseTestLRU {
public void testLRUCache() throws Exception {
final int n = 100;