Handle TokenizerFactory TODOs (#32063)

* Don't replace Replace TokenizerFactory with Supplier, this approach was rejected in #32063 
* Remove unused parameter from constructor
This commit is contained in:
Armin Braun 2018-07-17 14:14:02 +02:00 committed by GitHub
parent a7e477126f
commit ed3b44fb4c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 22 additions and 23 deletions

View File

@ -39,7 +39,7 @@ public class CharGroupTokenizerFactory extends AbstractTokenizerFactory{
private boolean tokenizeOnSymbol = false;
public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
for (final String c : settings.getAsList("tokenize_on_chars")) {
if (c == null || c.length() == 0) {

View File

@ -35,7 +35,7 @@ public class ClassicTokenizerFactory extends AbstractTokenizerFactory {
private final int maxTokenLength;
ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}

View File

@ -36,7 +36,7 @@ public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
private final CharMatcher matcher;
EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
this.matcher = parseTokenChars(settings.getAsList("token_chars"));

View File

@ -31,7 +31,7 @@ public class KeywordTokenizerFactory extends AbstractTokenizerFactory {
private final int bufferSize;
KeywordTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
bufferSize = settings.getAsInt("buffer_size", 256);
}

View File

@ -29,7 +29,7 @@ import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
public class LetterTokenizerFactory extends AbstractTokenizerFactory {
LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
}
@Override

View File

@ -30,7 +30,7 @@ import org.elasticsearch.index.analysis.MultiTermAwareComponent;
public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory implements MultiTermAwareComponent {
LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
}
@Override

View File

@ -85,7 +85,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory {
}
NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff();
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);

View File

@ -37,7 +37,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
private final boolean reverse;
PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
bufferSize = settings.getAsInt("buffer_size", 1024);
String delimiter = settings.get("delimiter");
if (delimiter == null) {

View File

@ -35,7 +35,7 @@ public class PatternTokenizerFactory extends AbstractTokenizerFactory {
private final int group;
PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
if (sPattern == null) {

View File

@ -31,7 +31,7 @@ public class SimplePatternSplitTokenizerFactory extends AbstractTokenizerFactory
private final String pattern;
public SimplePatternSplitTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
pattern = settings.get("pattern", "");
}

View File

@ -31,7 +31,7 @@ public class SimplePatternTokenizerFactory extends AbstractTokenizerFactory {
private final String pattern;
public SimplePatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
pattern = settings.get("pattern", "");
}

View File

@ -32,7 +32,7 @@ import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
public class ThaiTokenizerFactory extends AbstractTokenizerFactory {
ThaiTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
}
@Override

View File

@ -32,7 +32,7 @@ public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory {
private final int maxTokenLength;
UAX29URLEmailTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}

View File

@ -34,7 +34,7 @@ public class WhitespaceTokenizerFactory extends AbstractTokenizerFactory {
private Integer maxTokenLength;
WhitespaceTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
maxTokenLength = settings.getAsInt(MAX_TOKEN_LENGTH, StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}

View File

@ -47,7 +47,7 @@ public class IcuTokenizerFactory extends AbstractTokenizerFactory {
private static final String RULE_FILES = "rule_files";
public IcuTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
config = getIcuConfig(environment, settings);
}

View File

@ -45,7 +45,7 @@ public class KuromojiTokenizerFactory extends AbstractTokenizerFactory {
private boolean discartPunctuation;
public KuromojiTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
mode = getMode(settings);
userDictionary = getUserDictionary(env, settings);
discartPunctuation = settings.getAsBoolean("discard_punctuation", true);

View File

@ -38,7 +38,7 @@ public class NoriTokenizerFactory extends AbstractTokenizerFactory {
private final KoreanTokenizer.DecompoundMode decompoundMode;
public NoriTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
decompoundMode = getMode(settings);
userDictionary = getUserDictionary(env, settings);
}

View File

@ -28,7 +28,7 @@ import org.elasticsearch.index.IndexSettings;
public class SmartChineseTokenizerTokenizerFactory extends AbstractTokenizerFactory {
public SmartChineseTokenizerTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
}
@Override

View File

@ -27,8 +27,7 @@ import org.elasticsearch.index.IndexSettings;
public abstract class AbstractTokenizerFactory extends AbstractIndexComponent implements TokenizerFactory {
protected final Version version;
// TODO drop `String ignored` in a followup
public AbstractTokenizerFactory(IndexSettings indexSettings, String ignored, Settings settings) {
public AbstractTokenizerFactory(IndexSettings indexSettings, Settings settings) {
super(indexSettings);
this.version = Analysis.parseAnalysisVersion(this.indexSettings.getSettings(), settings, logger);
}

View File

@ -31,7 +31,7 @@ public class StandardTokenizerFactory extends AbstractTokenizerFactory {
private final int maxTokenLength;
public StandardTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}

View File

@ -21,6 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
public interface TokenizerFactory { // TODO replace with Supplier<Tokenizer>
public interface TokenizerFactory {
Tokenizer create();
}

View File

@ -20,7 +20,7 @@ import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
public class MlClassicTokenizerFactory extends AbstractTokenizerFactory {
public MlClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
super(indexSettings, settings);
}
@Override