mirror of https://github.com/apache/lucene.git
LUCENE-2167: cut over these analyzers also
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1002221 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
dfed16f2fd
commit
f5031a6b27
|
@ -119,7 +119,7 @@ public final class CatalanAnalyzer extends StopwordAnalyzerBase {
|
|||
protected TokenStreamComponents createComponents(String fieldName,
|
||||
Reader reader) {
|
||||
final Tokenizer source = new StandardTokenizer(matchVersion, reader);
|
||||
TokenStream result = new StandardFilter(source);
|
||||
TokenStream result = new StandardFilter(matchVersion, source);
|
||||
result = new LowerCaseFilter(matchVersion, result);
|
||||
result = new StopFilter(matchVersion, result, stopwords);
|
||||
if(!stemExclusionSet.isEmpty())
|
||||
|
|
|
@ -119,7 +119,7 @@ public final class BasqueAnalyzer extends StopwordAnalyzerBase {
|
|||
protected TokenStreamComponents createComponents(String fieldName,
|
||||
Reader reader) {
|
||||
final Tokenizer source = new StandardTokenizer(matchVersion, reader);
|
||||
TokenStream result = new StandardFilter(source);
|
||||
TokenStream result = new StandardFilter(matchVersion, source);
|
||||
result = new LowerCaseFilter(matchVersion, result);
|
||||
result = new StopFilter(matchVersion, result, stopwords);
|
||||
if(!stemExclusionSet.isEmpty())
|
||||
|
|
|
@ -119,7 +119,7 @@ public final class ArmenianAnalyzer extends StopwordAnalyzerBase {
|
|||
protected TokenStreamComponents createComponents(String fieldName,
|
||||
Reader reader) {
|
||||
final Tokenizer source = new StandardTokenizer(matchVersion, reader);
|
||||
TokenStream result = new StandardFilter(source);
|
||||
TokenStream result = new StandardFilter(matchVersion, source);
|
||||
result = new LowerCaseFilter(matchVersion, result);
|
||||
result = new StopFilter(matchVersion, result, stopwords);
|
||||
if(!stemExclusionSet.isEmpty())
|
||||
|
|
|
@ -31,6 +31,8 @@ import org.apache.lucene.util.Version;
|
|||
public class StandardFilter extends TokenFilter {
|
||||
private final Version matchVersion;
|
||||
|
||||
/** @deprecated Use {@link #StandardFilter(Version, TokenStream)} instead. */
|
||||
@Deprecated
|
||||
public StandardFilter(TokenStream in) {
|
||||
this(Version.LUCENE_30, in);
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ public final class PolishAnalyzer extends StopwordAnalyzerBase {
|
|||
protected TokenStreamComponents createComponents(String fieldName,
|
||||
Reader reader) {
|
||||
final Tokenizer source = new StandardTokenizer(matchVersion, reader);
|
||||
TokenStream result = new StandardFilter(source);
|
||||
TokenStream result = new StandardFilter(matchVersion, source);
|
||||
result = new LowerCaseFilter(matchVersion, result);
|
||||
result = new StopFilter(matchVersion, result, stopwords);
|
||||
if(!stemExclusionSet.isEmpty())
|
||||
|
|
Loading…
Reference in New Issue