SOLR-367: specific return types from create in Token*Factories

git-svn-id: https://svn.apache.org/repos/asf/lucene/solr/trunk@591158 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris M. Hostetter 2007-11-01 22:37:42 +00:00
parent 61a6b8db77
commit e1ad6e3ef7
27 changed files with 31 additions and 27 deletions

View File

@ -214,7 +214,11 @@ Other Changes
init(Map<String,String> args)
Existing classes should continue to work, but it is encouraged to update
the initialization code. (Henri Biestro vi ryan)
5. SOLR-367: The create method in all TokenFilter and Tokenizer Factories
provided by Solr now declare their specific return types instead of just
using "TokenStream" (hossman)
================== Release 1.2, 20070602 ==================

View File

@ -153,7 +153,7 @@ public class CapitalizationFilterFactory extends BaseTokenFilterFactory
return word.toString();
}
public TokenStream create(TokenStream input) {
public CapitalizationFilter create(TokenStream input) {
return new CapitalizationFilter(input,this);
}
}

View File

@ -49,7 +49,7 @@ public class EdgeNGramFilterFactory extends BaseTokenFilterFactory {
}
}
public TokenStream create(TokenStream input) {
public EdgeNGramTokenFilter create(TokenStream input) {
return new EdgeNGramTokenFilter(input, side, minGramSize, maxGramSize);
}
}

View File

@ -48,7 +48,7 @@ public class EdgeNGramTokenizerFactory extends BaseTokenizerFactory {
}
}
public TokenStream create(Reader input) {
public EdgeNGramTokenizer create(Reader input) {
return new EdgeNGramTokenizer(input, side, minGramSize, maxGramSize);
}
}

View File

@ -48,7 +48,7 @@ public class EnglishPorterFilterFactory extends BaseTokenFilterFactory {
private Set protectedWords = null;
public TokenStream create(TokenStream input) {
public EnglishPorterFilter create(TokenStream input) {
return new EnglishPorterFilter(input,protectedWords);
}
}

View File

@ -24,7 +24,7 @@ import org.apache.solr.analysis.BaseTokenFilterFactory;
* Factory for HyphenatedWordsFilter
*/
public class HyphenatedWordsFilterFactory extends BaseTokenFilterFactory {
public TokenStream create(TokenStream input) {
public HyphenatedWordsFilter create(TokenStream input) {
return new HyphenatedWordsFilter(input);
}
}

View File

@ -24,7 +24,7 @@ import org.apache.lucene.analysis.TokenStream;
* $Id$
*/
public class ISOLatin1AccentFilterFactory extends BaseTokenFilterFactory {
public TokenStream create(TokenStream input) {
public ISOLatin1AccentFilter create(TokenStream input) {
return new ISOLatin1AccentFilter(input);
}
}

View File

@ -66,7 +66,7 @@ public class KeepWordFilterFactory extends BaseTokenFilterFactory {
this.ignoreCase = ignoreCase;
}
public TokenStream create(TokenStream input) {
public KeepWordFilter create(TokenStream input) {
return new KeepWordFilter(input,words,ignoreCase);
}
}

View File

@ -26,7 +26,7 @@ import java.io.Reader;
* @version $Id: LowerCaseTokenizerFactory.java 382610 2006-03-03 01:43:03Z yonik $
*/
public class KeywordTokenizerFactory extends BaseTokenizerFactory {
public TokenStream create(Reader input) {
public KeywordTokenizer create(Reader input) {
return new KeywordTokenizer(input);
}
}

View File

@ -33,7 +33,7 @@ public class LengthFilterFactory extends BaseTokenFilterFactory {
min=Integer.parseInt(args.get("min"));
max=Integer.parseInt(args.get("max"));
}
public TokenStream create(TokenStream input) {
public LengthFilter create(TokenStream input) {
return new LengthFilter(input,min,max);
}
}

View File

@ -26,7 +26,7 @@ import java.io.Reader;
* @version $Id$
*/
public class LetterTokenizerFactory extends BaseTokenizerFactory {
public TokenStream create(Reader input) {
public LetterTokenizer create(Reader input) {
return new LetterTokenizer(input);
}
}

View File

@ -24,7 +24,7 @@ import org.apache.lucene.analysis.LowerCaseFilter;
* @version $Id$
*/
public class LowerCaseFilterFactory extends BaseTokenFilterFactory {
public TokenStream create(TokenStream input) {
public LowerCaseFilter create(TokenStream input) {
return new LowerCaseFilter(input);
}
}

View File

@ -26,7 +26,7 @@ import java.io.Reader;
* @version $Id$
*/
public class LowerCaseTokenizerFactory extends BaseTokenizerFactory {
public TokenStream create(Reader input) {
public LowerCaseTokenizer create(Reader input) {
return new LowerCaseTokenizer(input);
}
}

View File

@ -43,7 +43,7 @@ public class NGramFilterFactory extends BaseTokenFilterFactory {
: NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
}
public TokenStream create(TokenStream input) {
public NGramTokenFilter create(TokenStream input) {
return new NGramTokenFilter(input, minGramSize, maxGramSize);
}
}

View File

@ -42,7 +42,7 @@ public class NGramTokenizerFactory extends BaseTokenizerFactory {
}
/** Creates the {@link TokenStream} of n-grams from the given {@link Reader}. */
public TokenStream create(Reader input) {
public NGramTokenizer create(Reader input) {
return new NGramTokenizer(input, minGramSize, maxGramSize);
}
}

View File

@ -60,7 +60,7 @@ public class PatternReplaceFilterFactory extends BaseTokenFilterFactory {
}
}
public TokenStream create(TokenStream input) {
public PatternReplaceFilter create(TokenStream input) {
return new PatternReplaceFilter(input, p, replacement, all);
}
}

View File

@ -86,7 +86,7 @@ public class PhoneticFilterFactory extends BaseTokenFilterFactory
}
}
public TokenStream create(TokenStream input) {
public PhoneticFilter create(TokenStream input) {
return new PhoneticFilter(input,encoder,name,inject);
}
}

View File

@ -24,7 +24,7 @@ import org.apache.lucene.analysis.PorterStemFilter;
* @version $Id$
*/
public class PorterStemFilterFactory extends BaseTokenFilterFactory {
public TokenStream create(TokenStream input) {
public PorterStemFilter create(TokenStream input) {
return new PorterStemFilter(input);
}
}

View File

@ -23,7 +23,7 @@ import org.apache.lucene.analysis.TokenStream;
* @version $Id:$
*/
public class RemoveDuplicatesTokenFilterFactory extends BaseTokenFilterFactory {
public TokenStream create(TokenStream input) {
public RemoveDuplicatesTokenFilter create(TokenStream input) {
return new RemoveDuplicatesTokenFilter(input);
}
}

View File

@ -42,7 +42,7 @@ public class SnowballPorterFilterFactory extends BaseTokenFilterFactory {
SolrCore.log.fine("SnowballPorterFilterFactory: language=" + language);
}
public TokenStream create(TokenStream input) {
public SnowballFilter create(TokenStream input) {
return new SnowballFilter(input,language);
}
}

View File

@ -24,7 +24,7 @@ import org.apache.lucene.analysis.standard.StandardFilter;
* @version $Id$
*/
public class StandardFilterFactory extends BaseTokenFilterFactory {
public TokenStream create(TokenStream input) {
public StandardFilter create(TokenStream input) {
return new StandardFilter(input);
}
}

View File

@ -27,7 +27,7 @@ import java.io.Reader;
*/
public class StandardTokenizerFactory extends BaseTokenizerFactory {
public TokenStream create(Reader input) {
public StandardTokenizer create(Reader input) {
return new StandardTokenizer(input);
}
}

View File

@ -51,7 +51,7 @@ public class StopFilterFactory extends BaseTokenFilterFactory {
private Set stopWords = StopFilter.makeStopSet(StopAnalyzer.ENGLISH_STOP_WORDS);
private boolean ignoreCase;
public TokenStream create(TokenStream input) {
public StopFilter create(TokenStream input) {
return new StopFilter(input,stopWords,ignoreCase);
}
}

View File

@ -113,7 +113,7 @@ public class SynonymFilterFactory extends BaseTokenFilterFactory {
}
public TokenStream create(TokenStream input) {
public SynonymFilter create(TokenStream input) {
return new SynonymFilter(input,synMap,ignoreCase);
}

View File

@ -46,7 +46,7 @@ public class TrimFilterFactory extends BaseTokenFilterFactory {
}
}
public TokenStream create(TokenStream input) {
public TrimFilter create(TokenStream input) {
return new TrimFilter(input, updateOffsets);
}
}

View File

@ -26,7 +26,7 @@ import java.io.Reader;
* @version $Id$
*/
public class WhitespaceTokenizerFactory extends BaseTokenizerFactory {
public TokenStream create(Reader input) {
public WhitespaceTokenizer create(Reader input) {
return new WhitespaceTokenizer(input);
}
}

View File

@ -42,7 +42,7 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory {
splitOnCaseChange = getInt("splitOnCaseChange", 1);
}
public TokenStream create(TokenStream input) {
public WordDelimiterFilter create(TokenStream input) {
return new WordDelimiterFilter(input,
generateWordParts, generateNumberParts,
catenateWords, catenateNumbers, catenateAll,