Simplify Analysis registration and configuration

This change moves all the analysis component registration to the node level
and removes the significant API overhead to register tokenfilter, tokenizer,
charfilter and analyzer. All registration is done without guice interaction such
that real factories via functional interfaces are passed instead of class objects
that are instantiated at runtime.

This change also hides the internal analyzer caching that was done previously in the
IndicesAnalysisService entirely and decouples all analysis registration and creation
from dependency injection.
This commit is contained in:
Simon Willnauer 2015-10-28 22:19:44 +01:00
parent 7f179cdab0
commit aa38d053d7
205 changed files with 1693 additions and 2813 deletions

View File

@ -24,32 +24,24 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.analysis.CharFilterFactory;
import org.elasticsearch.index.analysis.CharFilterFactoryFactory;
import org.elasticsearch.index.analysis.CustomAnalyzer;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.index.analysis.TokenFilterFactoryFactory;
import org.elasticsearch.index.analysis.TokenizerFactory;
import org.elasticsearch.index.analysis.TokenizerFactoryFactory;
import org.elasticsearch.index.analysis.*;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -63,17 +55,15 @@ import java.util.List;
public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRequest, AnalyzeResponse> {
private final IndicesService indicesService;
private final IndicesAnalysisService indicesAnalysisService;
private static final Settings DEFAULT_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
private final Environment environment;
@Inject
public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, IndicesAnalysisService indicesAnalysisService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
IndicesService indicesService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Environment environment) {
super(settings, AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, AnalyzeRequest::new, ThreadPool.Names.INDEX);
this.indicesService = indicesService;
this.indicesAnalysisService = indicesAnalysisService;
this.environment = environment;
}
@Override
@ -105,53 +95,69 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
@Override
protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) {
IndexService indexService = null;
if (shardId != null) {
indexService = indicesService.indexServiceSafe(shardId.getIndex());
}
Analyzer analyzer = null;
boolean closeAnalyzer = false;
String field = null;
if (request.field() != null) {
if (indexService == null) {
throw new IllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
try {
final IndexService indexService;
if (shardId != null) {
indexService = indicesService.indexServiceSafe(shardId.getIndex());
} else {
indexService = null;
}
MappedFieldType fieldType = indexService.mapperService().smartNameFieldType(request.field());
if (fieldType != null) {
if (fieldType.isNumeric()) {
throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
String field = null;
Analyzer analyzer = null;
if (request.field() != null) {
if (indexService == null) {
throw new IllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
}
MappedFieldType fieldType = indexService.mapperService().smartNameFieldType(request.field());
if (fieldType != null) {
if (fieldType.isNumeric()) {
throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
}
analyzer = fieldType.indexAnalyzer();
field = fieldType.names().indexName();
}
analyzer = fieldType.indexAnalyzer();
field = fieldType.names().indexName();
}
}
if (field == null) {
if (indexService != null) {
field = indexService.queryParserService().defaultField();
} else {
field = AllFieldMapper.NAME;
if (field == null) {
if (indexService != null) {
field = indexService.queryParserService().defaultField();
} else {
field = AllFieldMapper.NAME;
}
}
final AnalysisRegistry analysisRegistry = indicesService.getAnalysis();
return analyze(request, field, analyzer, indexService != null ? indexService.analysisService() : null, analysisRegistry, environment);
} catch (IOException e) {
throw new ElasticsearchException("analysis failed", e);
}
}
public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, AnalysisService analysisService, AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
boolean closeAnalyzer = false;
if (analyzer == null && request.analyzer() != null) {
if (indexService == null) {
analyzer = indicesAnalysisService.analyzer(request.analyzer());
if (analysisService == null) {
analyzer = analysisRegistry.getAnalyzer(request.analyzer());
if (analyzer == null) {
throw new IllegalArgumentException("failed to find global analyzer [" + request.analyzer() + "]");
}
} else {
analyzer = indexService.analysisService().analyzer(request.analyzer());
}
if (analyzer == null) {
throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
analyzer = analysisService.analyzer(request.analyzer());
if (analyzer == null) {
throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
}
}
} else if (request.tokenizer() != null) {
TokenizerFactory tokenizerFactory;
if (indexService == null) {
TokenizerFactoryFactory tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(request.tokenizer());
if (analysisService == null) {
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(request.tokenizer());
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]");
}
tokenizerFactory = tokenizerFactoryFactory.create(request.tokenizer(), DEFAULT_SETTINGS);
tokenizerFactory = tokenizerFactoryFactory.get(environment, request.tokenizer());
} else {
tokenizerFactory = indexService.analysisService().tokenizer(request.tokenizer());
tokenizerFactory = analysisService.tokenizer(request.tokenizer());
if (tokenizerFactory == null) {
throw new IllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]");
}
@ -162,14 +168,14 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().length];
for (int i = 0; i < request.tokenFilters().length; i++) {
String tokenFilterName = request.tokenFilters()[i];
if (indexService == null) {
TokenFilterFactoryFactory tokenFilterFactoryFactory = indicesAnalysisService.tokenFilterFactoryFactory(tokenFilterName);
if (analysisService == null) {
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilterName);
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]");
}
tokenFilterFactories[i] = tokenFilterFactoryFactory.create(tokenFilterName, DEFAULT_SETTINGS);
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilterName);
} else {
tokenFilterFactories[i] = indexService.analysisService().tokenFilter(tokenFilterName);
tokenFilterFactories[i] = analysisService.tokenFilter(tokenFilterName);
if (tokenFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
}
@ -185,20 +191,20 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
charFilterFactories = new CharFilterFactory[request.charFilters().length];
for (int i = 0; i < request.charFilters().length; i++) {
String charFilterName = request.charFilters()[i];
if (indexService == null) {
CharFilterFactoryFactory charFilterFactoryFactory = indicesAnalysisService.charFilterFactoryFactory(charFilterName);
if (analysisService == null) {
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilterName);
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterName + "]");
}
charFilterFactories[i] = charFilterFactoryFactory.create(charFilterName, DEFAULT_SETTINGS);
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilterName);
} else {
charFilterFactories[i] = indexService.analysisService().charFilter(charFilterName);
charFilterFactories[i] = analysisService.charFilter(charFilterName);
if (charFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]");
throw new IllegalArgumentException("failed to find char filter under [" + charFilterName + "]");
}
}
if (charFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]");
throw new IllegalArgumentException("failed to find char filter under [" + charFilterName + "]");
}
}
}
@ -206,10 +212,10 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
analyzer = new CustomAnalyzer(tokenizerFactory, charFilterFactories, tokenFilterFactories);
closeAnalyzer = true;
} else if (analyzer == null) {
if (indexService == null) {
analyzer = indicesAnalysisService.analyzer("standard");
if (analysisService == null) {
analyzer = analysisRegistry.getAnalyzer("standard");
} else {
analyzer = indexService.analysisService().defaultIndexAnalyzer();
analyzer = analysisService.defaultIndexAnalyzer();
}
}
if (analyzer == null) {

View File

@ -257,7 +257,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
};
public FakeAnalysisService(IndexSettings indexSettings) {
super(indexSettings);
super(indexSettings, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
}
@Override

View File

@ -20,8 +20,11 @@
package org.elasticsearch.index;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache;
@ -41,6 +44,7 @@ import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import java.io.IOException;
import java.util.*;
import java.util.function.BiFunction;
import java.util.function.Consumer;
@ -69,6 +73,7 @@ public final class IndexModule extends AbstractModule {
private final IndexSettings indexSettings;
private final IndexStoreConfig indexStoreConfig;
private final IndicesQueryCache indicesQueryCache;
private final AnalysisRegistry analysisRegistry;
// pkg private so tests can mock
Class<? extends EngineFactory> engineFactoryImpl = InternalEngineFactory.class;
private SetOnce<IndexSearcherWrapperFactory> indexSearcherWrapper = new SetOnce<>();
@ -81,11 +86,12 @@ public final class IndexModule extends AbstractModule {
private IndicesWarmer indicesWarmer;
public IndexModule(IndexSettings indexSettings, IndexStoreConfig indexStoreConfig, IndicesQueryCache indicesQueryCache, IndicesWarmer warmer) {
public IndexModule(IndexSettings indexSettings, IndexStoreConfig indexStoreConfig, IndicesQueryCache indicesQueryCache, IndicesWarmer warmer, AnalysisRegistry analysisRegistry) {
this.indexStoreConfig = indexStoreConfig;
this.indexSettings = indexSettings;
this.indicesQueryCache = indicesQueryCache;
this.indicesWarmer = warmer;
this.analysisRegistry = analysisRegistry;
registerQueryCache(INDEX_QUERY_CACHE, IndexQueryCache::new);
registerQueryCache(NONE_QUERY_CACHE, (a, b) -> new NoneQueryCache(a));
}
@ -216,6 +222,11 @@ public final class IndexModule extends AbstractModule {
@Override
protected void configure() {
try {
bind(AnalysisService.class).toInstance(analysisRegistry.build(indexSettings));
} catch (IOException e) {
throw new ElasticsearchException("can't create analysis service", e);
}
bind(EngineFactory.class).to(engineFactoryImpl).asEagerSingleton();
bind(IndexSearcherWrapperFactory.class).toInstance(indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get());
bind(IndexEventListener.class).toInstance(freeze());

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -32,8 +31,7 @@ import org.elasticsearch.index.IndexSettings;
public class ASCIIFoldingTokenFilterFactory extends AbstractTokenFilterFactory {
private final boolean preserveOriginal;
@Inject
public ASCIIFoldingTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ASCIIFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
preserveOriginal = settings.getAsBoolean("preserve_original", false);
}

View File

@ -1,507 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Scopes;
import org.elasticsearch.common.inject.assistedinject.FactoryProvider;
import org.elasticsearch.common.inject.multibindings.MapBinder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Objects;
/**
*
*/
public class AnalysisModule extends AbstractModule {
public static class AnalysisBinderProcessor {
public void processCharFilters(CharFiltersBindings charFiltersBindings) {
}
public static class CharFiltersBindings {
private final Map<String, Class<? extends CharFilterFactory>> charFilters = new HashMap<>();
public CharFiltersBindings() {
}
public void processCharFilter(String name, Class<? extends CharFilterFactory> charFilterFactory) {
charFilters.put(name, charFilterFactory);
}
}
public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
}
public static class TokenFiltersBindings {
private final Map<String, Class<? extends TokenFilterFactory>> tokenFilters = new HashMap<>();
public TokenFiltersBindings() {
}
public void processTokenFilter(String name, Class<? extends TokenFilterFactory> tokenFilterFactory) {
tokenFilters.put(name, tokenFilterFactory);
}
}
public void processTokenizers(TokenizersBindings tokenizersBindings) {
}
public static class TokenizersBindings {
private final Map<String, Class<? extends TokenizerFactory>> tokenizers = new HashMap<>();
public TokenizersBindings() {
}
public void processTokenizer(String name, Class<? extends TokenizerFactory> tokenizerFactory) {
tokenizers.put(name, tokenizerFactory);
}
}
public void processAnalyzers(AnalyzersBindings analyzersBindings) {
}
public static class AnalyzersBindings {
private final Map<String, Class<? extends AnalyzerProvider>> analyzers = new HashMap<>();
public AnalyzersBindings() {
}
public void processAnalyzer(String name, Class<? extends AnalyzerProvider> analyzerProvider) {
analyzers.put(name, analyzerProvider);
}
}
}
private final Settings settings;
private final IndicesAnalysisService indicesAnalysisService;
private final LinkedList<AnalysisBinderProcessor> processors = new LinkedList<>();
private final Map<String, Class<? extends CharFilterFactory>> charFilters = new HashMap<>();
private final Map<String, Class<? extends TokenFilterFactory>> tokenFilters = new HashMap<>();
private final Map<String, Class<? extends TokenizerFactory>> tokenizers = new HashMap<>();
private final Map<String, Class<? extends AnalyzerProvider>> analyzers = new HashMap<>();
public AnalysisModule(Settings settings, IndicesAnalysisService indicesAnalysisService) {
Objects.requireNonNull(indicesAnalysisService);
this.settings = settings;
this.indicesAnalysisService = indicesAnalysisService;
processors.add(new DefaultProcessor());
try {
processors.add(new ExtendedProcessor());
} catch (Throwable t) {
// ignore. no extended ones
}
}
public AnalysisModule addProcessor(AnalysisBinderProcessor processor) {
processors.addFirst(processor);
return this;
}
public AnalysisModule addCharFilter(String name, Class<? extends CharFilterFactory> charFilter) {
charFilters.put(name, charFilter);
return this;
}
public AnalysisModule addTokenFilter(String name, Class<? extends TokenFilterFactory> tokenFilter) {
tokenFilters.put(name, tokenFilter);
return this;
}
public AnalysisModule addTokenizer(String name, Class<? extends TokenizerFactory> tokenizer) {
tokenizers.put(name, tokenizer);
return this;
}
public AnalysisModule addAnalyzer(String name, Class<? extends AnalyzerProvider> analyzer) {
analyzers.put(name, analyzer);
return this;
}
@Override
protected void configure() {
MapBinder<String, CharFilterFactoryFactory> charFilterBinder
= MapBinder.newMapBinder(binder(), String.class, CharFilterFactoryFactory.class);
// CHAR FILTERS
AnalysisBinderProcessor.CharFiltersBindings charFiltersBindings = new AnalysisBinderProcessor.CharFiltersBindings();
for (AnalysisBinderProcessor processor : processors) {
processor.processCharFilters(charFiltersBindings);
}
charFiltersBindings.charFilters.putAll(charFilters);
Map<String, Settings> charFiltersSettings = settings.getGroups("index.analysis.char_filter");
for (Map.Entry<String, Settings> entry : charFiltersSettings.entrySet()) {
String charFilterName = entry.getKey();
Settings charFilterSettings = entry.getValue();
String typeName = charFilterSettings.get("type");
if (typeName == null) {
throw new IllegalArgumentException("CharFilter [" + charFilterName + "] must have a type associated with it");
}
Class<? extends CharFilterFactory> type = charFiltersBindings.charFilters.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown CharFilter type [" + typeName + "] for [" + charFilterName + "]");
}
charFilterBinder.addBinding(charFilterName).toProvider(FactoryProvider.newFactory(CharFilterFactoryFactory.class, type)).in(Scopes.SINGLETON);
}
// go over the char filters in the bindings and register the ones that are not configured
for (Map.Entry<String, Class<? extends CharFilterFactory>> entry : charFiltersBindings.charFilters.entrySet()) {
String charFilterName = entry.getKey();
Class<? extends CharFilterFactory> clazz = entry.getValue();
// we don't want to re-register one that already exists
if (charFiltersSettings.containsKey(charFilterName)) {
continue;
}
// check, if it requires settings, then don't register it, we know default has no settings...
if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
continue;
}
// register if it's not builtin
if (indicesAnalysisService.hasCharFilter(charFilterName) == false) {
charFilterBinder.addBinding(charFilterName).toProvider(FactoryProvider.newFactory(CharFilterFactoryFactory.class, clazz)).in(Scopes.SINGLETON);
}
}
// TOKEN FILTERS
MapBinder<String, TokenFilterFactoryFactory> tokenFilterBinder
= MapBinder.newMapBinder(binder(), String.class, TokenFilterFactoryFactory.class);
// initial default bindings
AnalysisBinderProcessor.TokenFiltersBindings tokenFiltersBindings = new AnalysisBinderProcessor.TokenFiltersBindings();
for (AnalysisBinderProcessor processor : processors) {
processor.processTokenFilters(tokenFiltersBindings);
}
tokenFiltersBindings.tokenFilters.putAll(tokenFilters);
Map<String, Settings> tokenFiltersSettings = settings.getGroups("index.analysis.filter");
for (Map.Entry<String, Settings> entry : tokenFiltersSettings.entrySet()) {
String tokenFilterName = entry.getKey();
Settings tokenFilterSettings = entry.getValue();
String typeName = tokenFilterSettings.get("type");
if (typeName == null) {
throw new IllegalArgumentException("TokenFilter [" + tokenFilterName + "] must have a type associated with it");
}
Class<? extends TokenFilterFactory> type = tokenFiltersBindings.tokenFilters.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown TokenFilter type [" + typeName + "] for [" + tokenFilterName + "]");
}
tokenFilterBinder.addBinding(tokenFilterName).toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, type)).in(Scopes.SINGLETON);
}
// go over the filters in the bindings and register the ones that are not configured
for (Map.Entry<String, Class<? extends TokenFilterFactory>> entry : tokenFiltersBindings.tokenFilters.entrySet()) {
String tokenFilterName = entry.getKey();
Class<? extends TokenFilterFactory> clazz = entry.getValue();
// we don't want to re-register one that already exists
if (tokenFiltersSettings.containsKey(tokenFilterName)) {
continue;
}
// check, if it requires settings, then don't register it, we know default has no settings...
if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
continue;
}
// register if it's not builtin
if (indicesAnalysisService.hasTokenFilter(tokenFilterName) == false) {
tokenFilterBinder.addBinding(tokenFilterName).toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, clazz)).in(Scopes.SINGLETON);
}
}
// TOKENIZER
MapBinder<String, TokenizerFactoryFactory> tokenizerBinder
= MapBinder.newMapBinder(binder(), String.class, TokenizerFactoryFactory.class);
// initial default bindings
AnalysisBinderProcessor.TokenizersBindings tokenizersBindings = new AnalysisBinderProcessor.TokenizersBindings();
for (AnalysisBinderProcessor processor : processors) {
processor.processTokenizers(tokenizersBindings);
}
tokenizersBindings.tokenizers.putAll(tokenizers);
Map<String, Settings> tokenizersSettings = settings.getGroups("index.analysis.tokenizer");
for (Map.Entry<String, Settings> entry : tokenizersSettings.entrySet()) {
String tokenizerName = entry.getKey();
Settings tokenizerSettings = entry.getValue();
String typeName = tokenizerSettings.get("type");
if (typeName == null) {
throw new IllegalArgumentException("Tokenizer [" + tokenizerName + "] must have a type associated with it");
}
Class<? extends TokenizerFactory> type = tokenizersBindings.tokenizers.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown Tokenizer type [" + typeName + "] for [" + tokenizerName + "]");
}
tokenizerBinder.addBinding(tokenizerName).toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, type)).in(Scopes.SINGLETON);
}
// go over the tokenizers in the bindings and register the ones that are not configured
for (Map.Entry<String, Class<? extends TokenizerFactory>> entry : tokenizersBindings.tokenizers.entrySet()) {
String tokenizerName = entry.getKey();
Class<? extends TokenizerFactory> clazz = entry.getValue();
// we don't want to re-register one that already exists
if (tokenizersSettings.containsKey(tokenizerName)) {
continue;
}
// check, if it requires settings, then don't register it, we know default has no settings...
if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
continue;
}
// register if it's not builtin
if (indicesAnalysisService.hasTokenizer(tokenizerName) == false) {
tokenizerBinder.addBinding(tokenizerName).toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, clazz)).in(Scopes.SINGLETON);
}
}
// ANALYZER
MapBinder<String, AnalyzerProviderFactory> analyzerBinder
= MapBinder.newMapBinder(binder(), String.class, AnalyzerProviderFactory.class);
// initial default bindings
AnalysisBinderProcessor.AnalyzersBindings analyzersBindings = new AnalysisBinderProcessor.AnalyzersBindings();
for (AnalysisBinderProcessor processor : processors) {
processor.processAnalyzers(analyzersBindings);
}
analyzersBindings.analyzers.putAll(analyzers);
Map<String, Settings> analyzersSettings = settings.getGroups("index.analysis.analyzer");
for (Map.Entry<String, Settings> entry : analyzersSettings.entrySet()) {
String analyzerName = entry.getKey();
Settings analyzerSettings = entry.getValue();
String typeName = analyzerSettings.get("type");
Class<? extends AnalyzerProvider> type;
if (typeName == null) {
if (analyzerSettings.get("tokenizer") != null) {
// custom analyzer, need to add it
type = CustomAnalyzerProvider.class;
} else {
throw new IllegalArgumentException("Analyzer [" + analyzerName + "] must have a type associated with it");
}
} else if (typeName.equals("custom")) {
type = CustomAnalyzerProvider.class;
} else {
type = analyzersBindings.analyzers.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown Analyzer type [" + typeName + "] for [" + analyzerName + "]");
}
}
analyzerBinder.addBinding(analyzerName).toProvider(FactoryProvider.newFactory(AnalyzerProviderFactory.class, type)).in(Scopes.SINGLETON);
}
// go over the analyzers in the bindings and register the ones that are not configured
for (Map.Entry<String, Class<? extends AnalyzerProvider>> entry : analyzersBindings.analyzers.entrySet()) {
String analyzerName = entry.getKey();
Class<? extends AnalyzerProvider> clazz = entry.getValue();
// we don't want to re-register one that already exists
if (analyzersSettings.containsKey(analyzerName)) {
continue;
}
// check, if it requires settings, then don't register it, we know default has no settings...
if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
continue;
}
// register if it's not builtin
if (indicesAnalysisService.hasAnalyzer(analyzerName) == false) {
analyzerBinder.addBinding(analyzerName).toProvider(FactoryProvider.newFactory(AnalyzerProviderFactory.class, clazz)).in(Scopes.SINGLETON);
}
}
bind(AnalysisService.class).in(Scopes.SINGLETON);
}
private static class DefaultProcessor extends AnalysisBinderProcessor {
@Override
public void processCharFilters(CharFiltersBindings charFiltersBindings) {
charFiltersBindings.processCharFilter("html_strip", HtmlStripCharFilterFactory.class);
charFiltersBindings.processCharFilter("pattern_replace", PatternReplaceCharFilterFactory.class);
}
@Override
public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
tokenFiltersBindings.processTokenFilter("stop", StopTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("reverse", ReverseTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("asciifolding", ASCIIFoldingTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("length", LengthTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("lowercase", LowerCaseTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("uppercase", UpperCaseTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("porter_stem", PorterStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("kstem", KStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("standard", StandardTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("nGram", NGramTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("ngram", NGramTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("edgeNGram", EdgeNGramTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("edge_ngram", EdgeNGramTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("shingle", ShingleTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("unique", UniqueTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("truncate", TruncateTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("trim", TrimTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("limit", LimitTokenCountFilterFactory.class);
tokenFiltersBindings.processTokenFilter("common_grams", CommonGramsTokenFilterFactory.class);
}
@Override
public void processTokenizers(TokenizersBindings tokenizersBindings) {
tokenizersBindings.processTokenizer("standard", StandardTokenizerFactory.class);
tokenizersBindings.processTokenizer("uax_url_email", UAX29URLEmailTokenizerFactory.class);
tokenizersBindings.processTokenizer("path_hierarchy", PathHierarchyTokenizerFactory.class);
tokenizersBindings.processTokenizer("keyword", KeywordTokenizerFactory.class);
tokenizersBindings.processTokenizer("letter", LetterTokenizerFactory.class);
tokenizersBindings.processTokenizer("lowercase", LowerCaseTokenizerFactory.class);
tokenizersBindings.processTokenizer("whitespace", WhitespaceTokenizerFactory.class);
tokenizersBindings.processTokenizer("nGram", NGramTokenizerFactory.class);
tokenizersBindings.processTokenizer("ngram", NGramTokenizerFactory.class);
tokenizersBindings.processTokenizer("edgeNGram", EdgeNGramTokenizerFactory.class);
tokenizersBindings.processTokenizer("edge_ngram", EdgeNGramTokenizerFactory.class);
}
@Override
public void processAnalyzers(AnalyzersBindings analyzersBindings) {
analyzersBindings.processAnalyzer("default", StandardAnalyzerProvider.class);
analyzersBindings.processAnalyzer("standard", StandardAnalyzerProvider.class);
analyzersBindings.processAnalyzer("standard_html_strip", StandardHtmlStripAnalyzerProvider.class);
analyzersBindings.processAnalyzer("simple", SimpleAnalyzerProvider.class);
analyzersBindings.processAnalyzer("stop", StopAnalyzerProvider.class);
analyzersBindings.processAnalyzer("whitespace", WhitespaceAnalyzerProvider.class);
analyzersBindings.processAnalyzer("keyword", KeywordAnalyzerProvider.class);
}
}
private static class ExtendedProcessor extends AnalysisBinderProcessor {
@Override
public void processCharFilters(CharFiltersBindings charFiltersBindings) {
charFiltersBindings.processCharFilter("mapping", MappingCharFilterFactory.class);
}
@Override
public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
tokenFiltersBindings.processTokenFilter("snowball", SnowballTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("stemmer", StemmerTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("word_delimiter", WordDelimiterTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("delimited_payload_filter", DelimitedPayloadTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("synonym", SynonymTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("elision", ElisionTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("keep", KeepWordFilterFactory.class);
tokenFiltersBindings.processTokenFilter("keep_types", KeepTypesFilterFactory.class);
tokenFiltersBindings.processTokenFilter("pattern_capture", PatternCaptureGroupTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("pattern_replace", PatternReplaceTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("dictionary_decompounder", DictionaryCompoundWordTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("hyphenation_decompounder", HyphenationCompoundWordTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("arabic_stem", ArabicStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("brazilian_stem", BrazilianStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("czech_stem", CzechStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("dutch_stem", DutchStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("french_stem", FrenchStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("german_stem", GermanStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("russian_stem", RussianStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("keyword_marker", KeywordMarkerTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("stemmer_override", StemmerOverrideTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("arabic_normalization", ArabicNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("german_normalization", GermanNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("hindi_normalization", HindiNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("indic_normalization", IndicNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("sorani_normalization", SoraniNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("persian_normalization", PersianNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("scandinavian_normalization", ScandinavianNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("scandinavian_folding", ScandinavianFoldingFilterFactory.class);
tokenFiltersBindings.processTokenFilter("serbian_normalization", SerbianNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("hunspell", HunspellTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("cjk_bigram", CJKBigramFilterFactory.class);
tokenFiltersBindings.processTokenFilter("cjk_width", CJKWidthFilterFactory.class);
tokenFiltersBindings.processTokenFilter("apostrophe", ApostropheFilterFactory.class);
tokenFiltersBindings.processTokenFilter("classic", ClassicFilterFactory.class);
tokenFiltersBindings.processTokenFilter("decimal_digit", DecimalDigitFilterFactory.class);
}
@Override
public void processTokenizers(TokenizersBindings tokenizersBindings) {
tokenizersBindings.processTokenizer("pattern", PatternTokenizerFactory.class);
tokenizersBindings.processTokenizer("classic", ClassicTokenizerFactory.class);
tokenizersBindings.processTokenizer("thai", ThaiTokenizerFactory.class);
}
@Override
public void processAnalyzers(AnalyzersBindings analyzersBindings) {
analyzersBindings.processAnalyzer("pattern", PatternAnalyzerProvider.class);
analyzersBindings.processAnalyzer("snowball", SnowballAnalyzerProvider.class);
analyzersBindings.processAnalyzer("arabic", ArabicAnalyzerProvider.class);
analyzersBindings.processAnalyzer("armenian", ArmenianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("basque", BasqueAnalyzerProvider.class);
analyzersBindings.processAnalyzer("brazilian", BrazilianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("bulgarian", BulgarianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("catalan", CatalanAnalyzerProvider.class);
analyzersBindings.processAnalyzer("chinese", ChineseAnalyzerProvider.class);
analyzersBindings.processAnalyzer("cjk", CjkAnalyzerProvider.class);
analyzersBindings.processAnalyzer("czech", CzechAnalyzerProvider.class);
analyzersBindings.processAnalyzer("danish", DanishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("dutch", DutchAnalyzerProvider.class);
analyzersBindings.processAnalyzer("english", EnglishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("finnish", FinnishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("french", FrenchAnalyzerProvider.class);
analyzersBindings.processAnalyzer("galician", GalicianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("german", GermanAnalyzerProvider.class);
analyzersBindings.processAnalyzer("greek", GreekAnalyzerProvider.class);
analyzersBindings.processAnalyzer("hindi", HindiAnalyzerProvider.class);
analyzersBindings.processAnalyzer("hungarian", HungarianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("indonesian", IndonesianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("irish", IrishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("italian", ItalianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("latvian", LatvianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("lithuanian", LithuanianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("norwegian", NorwegianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("persian", PersianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("portuguese", PortugueseAnalyzerProvider.class);
analyzersBindings.processAnalyzer("romanian", RomanianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("russian", RussianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("sorani", SoraniAnalyzerProvider.class);
analyzersBindings.processAnalyzer("spanish", SpanishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("swedish", SwedishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("turkish", TurkishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("thai", ThaiAnalyzerProvider.class);
}
}
}

View File

@ -0,0 +1,461 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
import org.elasticsearch.indices.analysis.*;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
/**
* An internal registry for tokenizer, token filter, char filter and analyzer.
* This class exists per node and allows to create per-index {@link AnalysisService} via {@link #build(IndexSettings)}
*/
public final class AnalysisRegistry implements Closeable {
private final Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilters;
private final Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters;
private final Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizers;
private final Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzers;
private final Map<String, Analyzer> cachedAnalyzer = new ConcurrentHashMap<>();
private final PrebuiltAnalysis prebuiltAnalysis;
private final HunspellService hunspellService;
private final Environment environemnt;
public AnalysisRegistry(HunspellService hunspellService, Environment environment) {
this(hunspellService, environment, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
}
public AnalysisRegistry(HunspellService hunspellService, Environment environment,
Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilters,
Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters,
Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizers,
Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzers) {
prebuiltAnalysis = new PrebuiltAnalysis();
this.hunspellService = hunspellService;
this.environemnt = environment;
final Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilterBuilder = new HashMap<>(charFilters);
final Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilterBuilder = new HashMap<>(tokenFilters);
final Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizerBuilder = new HashMap<>(tokenizers);
final Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzerBuilder= new HashMap<>(analyzers);
registerBuiltInAnalyzer(analyzerBuilder);
registerBuiltInCharFilter(charFilterBuilder);
registerBuiltInTokenizer(tokenizerBuilder);
registerBuiltInTokenFilters(tokenFilterBuilder);
this.tokenFilters = Collections.unmodifiableMap(tokenFilterBuilder);
this.tokenizers = Collections.unmodifiableMap(tokenizerBuilder);
this.charFilters = Collections.unmodifiableMap(charFilterBuilder);
this.analyzers = Collections.unmodifiableMap(analyzerBuilder);
}
/**
* Returns a registered {@link TokenizerFactory} provider by name or <code>null</code> if the tokenizer was not registered
*/
public AnalysisModule.AnalysisProvider<TokenizerFactory> getTokenizerProvider(String tokenizer) {
return tokenizers.getOrDefault(tokenizer, this.prebuiltAnalysis.getTokenizerFactory(tokenizer));
}
/**
* Returns a registered {@link TokenFilterFactory} provider by name or <code>null</code> if the token filter was not registered
*/
public AnalysisModule.AnalysisProvider<TokenFilterFactory> getTokenFilterProvider(String tokenFilter) {
return tokenFilters.getOrDefault(tokenFilter, this.prebuiltAnalysis.getTokenFilterFactory(tokenFilter));
}
/**
* Returns a registered {@link CharFilterFactory} provider by name or <code>null</code> if the char filter was not registered
*/
public AnalysisModule.AnalysisProvider<CharFilterFactory> getCharFilterProvider(String charFilter) {
return charFilters.getOrDefault(charFilter, this.prebuiltAnalysis.getCharFilterFactory(charFilter));
}
/**
* Returns a registered {@link Analyzer} provider by name or <code>null</code> if the analyzer was not registered
*/
public Analyzer getAnalyzer(String analyzer) throws IOException {
AnalysisModule.AnalysisProvider<AnalyzerProvider> analyzerProvider = this.prebuiltAnalysis.getAnalyzerProvider(analyzer);
if (analyzerProvider == null) {
AnalysisModule.AnalysisProvider<AnalyzerProvider> provider = analyzers.get(analyzer);
return provider == null ? null : cachedAnalyzer.computeIfAbsent(analyzer, (key) -> {
try {
return provider.get(environemnt, key).get();
} catch (IOException ex) {
throw new ElasticsearchException("failed to load analyzer for name " + key, ex);
}}
);
}
return analyzerProvider.get(environemnt, analyzer).get();
}
@Override
public void close() throws IOException {
try {
prebuiltAnalysis.close();
} finally {
IOUtils.close(cachedAnalyzer.values());
}
}
/**
* Creates an index-level {@link AnalysisService} from this registry using the given index settings
*/
public AnalysisService build(IndexSettings indexSettings) throws IOException {
final Map<String, Settings> charFiltersSettings = indexSettings.getSettings().getGroups("index.analysis.char_filter");
final Map<String, Settings> tokenFiltersSettings = indexSettings.getSettings().getGroups("index.analysis.filter");
final Map<String, Settings> tokenizersSettings = indexSettings.getSettings().getGroups("index.analysis.tokenizer");
final Map<String, Settings> analyzersSettings = indexSettings.getSettings().getGroups("index.analysis.analyzer");
final Map<String, CharFilterFactory> charFilterFactories = buildMapping(false, "charfilter", indexSettings, charFiltersSettings, charFilters, prebuiltAnalysis.charFilterFactories);
final Map<String, TokenizerFactory> tokenizerFactories = buildMapping(false, "tokenizer", indexSettings, tokenizersSettings, tokenizers, prebuiltAnalysis.tokenizerFactories);
Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters = new HashMap<>(this.tokenFilters);
/*
* synonym is different than everything else since it needs access to the tokenizer factories for this index.
* instead of building the infrastructure for plugins we rather make it a real exception to not pollute the general interface and
* hide internal data-structures as much as possible.
*/
tokenFilters.put("synonym", requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, tokenizerFactories, name, settings)));
final Map<String, TokenFilterFactory> tokenFilterFactories = buildMapping(false, "tokenfilter", indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories);
final Map<String, AnalyzerProvider> analyzierFactories = buildMapping(true, "analyzer", indexSettings, analyzersSettings, analyzers, prebuiltAnalysis.analyzerProviderFactories);
return new AnalysisService(indexSettings, analyzierFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories);
}
private static <T> AnalysisModule.AnalysisProvider<T> requriesAnalysisSettings(AnalysisModule.AnalysisProvider<T> provider) {
return new AnalysisModule.AnalysisProvider<T>() {
@Override
public T get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {
return provider.get(indexSettings, environment, name, settings);
}
@Override
public boolean requiresAnalysisSettings() {
return true;
}
};
}
private void registerBuiltInCharFilter(Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilters) {
charFilters.put("html_strip", HtmlStripCharFilterFactory::new);
charFilters.put("pattern_replace", requriesAnalysisSettings(PatternReplaceCharFilterFactory::new));
charFilters.put("mapping", requriesAnalysisSettings(MappingCharFilterFactory::new));
}
private void registerBuiltInTokenizer(Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizers) {
tokenizers.put("standard", StandardTokenizerFactory::new);
tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new);
tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new);
tokenizers.put("keyword", KeywordTokenizerFactory::new);
tokenizers.put("letter", LetterTokenizerFactory::new);
tokenizers.put("lowercase", LowerCaseTokenizerFactory::new);
tokenizers.put("whitespace", WhitespaceTokenizerFactory::new);
tokenizers.put("nGram", NGramTokenizerFactory::new);
tokenizers.put("ngram", NGramTokenizerFactory::new);
tokenizers.put("edgeNGram", EdgeNGramTokenizerFactory::new);
tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new);
tokenizers.put("pattern", PatternTokenizerFactory::new);
tokenizers.put("classic", ClassicTokenizerFactory::new);
tokenizers.put("thai", ThaiTokenizerFactory::new);
}
private void registerBuiltInTokenFilters(Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters) {
tokenFilters.put("stop", StopTokenFilterFactory::new);
tokenFilters.put("reverse", ReverseTokenFilterFactory::new);
tokenFilters.put("asciifolding", ASCIIFoldingTokenFilterFactory::new);
tokenFilters.put("length", LengthTokenFilterFactory::new);
tokenFilters.put("lowercase", LowerCaseTokenFilterFactory::new);
tokenFilters.put("uppercase", UpperCaseTokenFilterFactory::new);
tokenFilters.put("porter_stem", PorterStemTokenFilterFactory::new);
tokenFilters.put("kstem", KStemTokenFilterFactory::new);
tokenFilters.put("standard", StandardTokenFilterFactory::new);
tokenFilters.put("nGram", NGramTokenFilterFactory::new);
tokenFilters.put("ngram", NGramTokenFilterFactory::new);
tokenFilters.put("edgeNGram", EdgeNGramTokenFilterFactory::new);
tokenFilters.put("edge_ngram", EdgeNGramTokenFilterFactory::new);
tokenFilters.put("shingle", ShingleTokenFilterFactory::new);
tokenFilters.put("unique", UniqueTokenFilterFactory::new);
tokenFilters.put("truncate", requriesAnalysisSettings(TruncateTokenFilterFactory::new));
tokenFilters.put("trim", TrimTokenFilterFactory::new);
tokenFilters.put("limit", LimitTokenCountFilterFactory::new);
tokenFilters.put("common_grams", requriesAnalysisSettings(CommonGramsTokenFilterFactory::new));
tokenFilters.put("snowball", SnowballTokenFilterFactory::new);
tokenFilters.put("stemmer", StemmerTokenFilterFactory::new);
tokenFilters.put("word_delimiter", WordDelimiterTokenFilterFactory::new);
tokenFilters.put("delimited_payload_filter", DelimitedPayloadTokenFilterFactory::new);
tokenFilters.put("elision", ElisionTokenFilterFactory::new);
tokenFilters.put("keep", requriesAnalysisSettings(KeepWordFilterFactory::new));
tokenFilters.put("keep_types", requriesAnalysisSettings(KeepTypesFilterFactory::new));
tokenFilters.put("pattern_capture", requriesAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new));
tokenFilters.put("pattern_replace", requriesAnalysisSettings(PatternReplaceTokenFilterFactory::new));
tokenFilters.put("dictionary_decompounder", requriesAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new));
tokenFilters.put("hyphenation_decompounder", requriesAnalysisSettings(HyphenationCompoundWordTokenFilterFactory::new));
tokenFilters.put("arabic_stem", ArabicStemTokenFilterFactory::new);
tokenFilters.put("brazilian_stem", BrazilianStemTokenFilterFactory::new);
tokenFilters.put("czech_stem", CzechStemTokenFilterFactory::new);
tokenFilters.put("dutch_stem", DutchStemTokenFilterFactory::new);
tokenFilters.put("french_stem", FrenchStemTokenFilterFactory::new);
tokenFilters.put("german_stem", GermanStemTokenFilterFactory::new);
tokenFilters.put("russian_stem", RussianStemTokenFilterFactory::new);
tokenFilters.put("keyword_marker", requriesAnalysisSettings(KeywordMarkerTokenFilterFactory::new));
tokenFilters.put("stemmer_override", requriesAnalysisSettings(StemmerOverrideTokenFilterFactory::new));
tokenFilters.put("arabic_normalization", ArabicNormalizationFilterFactory::new);
tokenFilters.put("german_normalization", GermanNormalizationFilterFactory::new);
tokenFilters.put("hindi_normalization", HindiNormalizationFilterFactory::new);
tokenFilters.put("indic_normalization", IndicNormalizationFilterFactory::new);
tokenFilters.put("sorani_normalization", SoraniNormalizationFilterFactory::new);
tokenFilters.put("persian_normalization", PersianNormalizationFilterFactory::new);
tokenFilters.put("scandinavian_normalization", ScandinavianNormalizationFilterFactory::new);
tokenFilters.put("scandinavian_folding", ScandinavianFoldingFilterFactory::new);
tokenFilters.put("serbian_normalization", SerbianNormalizationFilterFactory::new);
if (hunspellService != null) {
tokenFilters.put("hunspell", requriesAnalysisSettings((indexSettings, env, name, settings) -> new HunspellTokenFilterFactory(indexSettings, name, settings, hunspellService)));
}
tokenFilters.put("cjk_bigram", CJKBigramFilterFactory::new);
tokenFilters.put("cjk_width", CJKWidthFilterFactory::new);
tokenFilters.put("apostrophe", ApostropheFilterFactory::new);
tokenFilters.put("classic", ClassicFilterFactory::new);
tokenFilters.put("decimal_digit", DecimalDigitFilterFactory::new);
}
private void registerBuiltInAnalyzer(Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzers) {
analyzers.put("default", StandardAnalyzerProvider::new);
analyzers.put("standard", StandardAnalyzerProvider::new);
analyzers.put("standard_html_strip", StandardHtmlStripAnalyzerProvider::new);
analyzers.put("simple", SimpleAnalyzerProvider::new);
analyzers.put("stop", StopAnalyzerProvider::new);
analyzers.put("whitespace", WhitespaceAnalyzerProvider::new);
analyzers.put("keyword", KeywordAnalyzerProvider::new);
analyzers.put("pattern", PatternAnalyzerProvider::new);
analyzers.put("snowball", SnowballAnalyzerProvider::new);
analyzers.put("arabic", ArabicAnalyzerProvider::new);
analyzers.put("armenian", ArmenianAnalyzerProvider::new);
analyzers.put("basque", BasqueAnalyzerProvider::new);
analyzers.put("brazilian", BrazilianAnalyzerProvider::new);
analyzers.put("bulgarian", BulgarianAnalyzerProvider::new);
analyzers.put("catalan", CatalanAnalyzerProvider::new);
analyzers.put("chinese", ChineseAnalyzerProvider::new);
analyzers.put("cjk", CjkAnalyzerProvider::new);
analyzers.put("czech", CzechAnalyzerProvider::new);
analyzers.put("danish", DanishAnalyzerProvider::new);
analyzers.put("dutch", DutchAnalyzerProvider::new);
analyzers.put("english", EnglishAnalyzerProvider::new);
analyzers.put("finnish", FinnishAnalyzerProvider::new);
analyzers.put("french", FrenchAnalyzerProvider::new);
analyzers.put("galician", GalicianAnalyzerProvider::new);
analyzers.put("german", GermanAnalyzerProvider::new);
analyzers.put("greek", GreekAnalyzerProvider::new);
analyzers.put("hindi", HindiAnalyzerProvider::new);
analyzers.put("hungarian", HungarianAnalyzerProvider::new);
analyzers.put("indonesian", IndonesianAnalyzerProvider::new);
analyzers.put("irish", IrishAnalyzerProvider::new);
analyzers.put("italian", ItalianAnalyzerProvider::new);
analyzers.put("latvian", LatvianAnalyzerProvider::new);
analyzers.put("lithuanian", LithuanianAnalyzerProvider::new);
analyzers.put("norwegian", NorwegianAnalyzerProvider::new);
analyzers.put("persian", PersianAnalyzerProvider::new);
analyzers.put("portuguese", PortugueseAnalyzerProvider::new);
analyzers.put("romanian", RomanianAnalyzerProvider::new);
analyzers.put("russian", RussianAnalyzerProvider::new);
analyzers.put("sorani", SoraniAnalyzerProvider::new);
analyzers.put("spanish", SpanishAnalyzerProvider::new);
analyzers.put("swedish", SwedishAnalyzerProvider::new);
analyzers.put("turkish", TurkishAnalyzerProvider::new);
analyzers.put("thai", ThaiAnalyzerProvider::new);
}
private <T> Map<String, T> buildMapping(boolean analyzer, String toBuild, IndexSettings settings, Map<String, Settings> settingsMap, Map<String, AnalysisModule.AnalysisProvider<T>> providerMap, Map<String, AnalysisModule.AnalysisProvider<T>> defaultInstance) throws IOException {
Settings defaultSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, settings.getIndexVersionCreated()).build();
Map<String, T> factories = new HashMap<>();
for (Map.Entry<String, Settings> entry : settingsMap.entrySet()) {
String name = entry.getKey();
Settings currentSettings = entry.getValue();
String typeName = currentSettings.get("type");
if (analyzer) {
T factory;
if (typeName == null) {
if (currentSettings.get("tokenizer") != null) {
factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings);
} else {
throw new IllegalArgumentException(toBuild + " [" + name + "] must have a type associated with it");
}
} else if (typeName.equals("custom")) {
factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings);
} else {
AnalysisModule.AnalysisProvider<T> type = providerMap.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]");
}
factory = type.get(settings, environemnt, name, currentSettings);
}
factories.put(name, factory);
} else {
if (typeName == null) {
throw new IllegalArgumentException(toBuild + " [" + name + "] must have a type associated with it");
}
AnalysisModule.AnalysisProvider<T> type = providerMap.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]");
}
final T factory = type.get(settings, environemnt, name, currentSettings);
factories.put(name, factory);
}
}
// go over the char filters in the bindings and register the ones that are not configured
for (Map.Entry<String, AnalysisModule.AnalysisProvider<T>> entry : providerMap.entrySet()) {
String name = entry.getKey();
AnalysisModule.AnalysisProvider<T> provider = entry.getValue();
// we don't want to re-register one that already exists
if (settingsMap.containsKey(name)) {
continue;
}
// check, if it requires settings, then don't register it, we know default has no settings...
if (provider.requiresAnalysisSettings()) {
continue;
}
AnalysisModule.AnalysisProvider<T> defaultProvider = defaultInstance.get(name);
final T instance;
if (defaultProvider == null) {
instance = provider.get(settings, environemnt, name, defaultSettings);
} else {
instance = defaultProvider.get(settings, environemnt, name, defaultSettings);
}
factories.put(name, instance);
String camelCase = Strings.toCamelCase(name);
if (providerMap.containsKey(camelCase) == false && factories.containsKey(camelCase) == false) {
factories.put(camelCase, instance);
}
}
for (Map.Entry<String, AnalysisModule.AnalysisProvider<T>> entry : defaultInstance.entrySet()) {
final String name = entry.getKey();
final AnalysisModule.AnalysisProvider<T> provider = entry.getValue();
final String camelCase = Strings.toCamelCase(name);
if (factories.containsKey(name) == false || (defaultInstance.containsKey(camelCase) == false && factories.containsKey(camelCase) == false)) {
final T instance = provider.get(settings, environemnt, name, defaultSettings);
if (factories.containsKey(name) == false) {
factories.put(name, instance);
}
if ((defaultInstance.containsKey(camelCase) == false && factories.containsKey(camelCase) == false)) {
factories.put(camelCase, instance);
}
}
}
return factories;
}
private static class PrebuiltAnalysis implements Closeable {
final Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzerProviderFactories;
final Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizerFactories;
final Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilterFactories;
final Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilterFactories;
private PrebuiltAnalysis() {
Map<String, PreBuiltAnalyzerProviderFactory> analyzerProviderFactories = new HashMap<>();
Map<String, PreBuiltTokenizerFactoryFactory> tokenizerFactories = new HashMap<>();
Map<String, PreBuiltTokenFilterFactoryFactory> tokenFilterFactories = new HashMap<>();
Map<String, PreBuiltCharFilterFactoryFactory> charFilterFactories = new HashMap<>();
// Analyzers
for (PreBuiltAnalyzers preBuiltAnalyzerEnum : PreBuiltAnalyzers.values()) {
String name = preBuiltAnalyzerEnum.name().toLowerCase(Locale.ROOT);
analyzerProviderFactories.put(name, new PreBuiltAnalyzerProviderFactory(name, AnalyzerScope.INDICES, preBuiltAnalyzerEnum.getAnalyzer(Version.CURRENT)));
}
// Tokenizers
for (PreBuiltTokenizers preBuiltTokenizer : PreBuiltTokenizers.values()) {
String name = preBuiltTokenizer.name().toLowerCase(Locale.ROOT);
tokenizerFactories.put(name, new PreBuiltTokenizerFactoryFactory(preBuiltTokenizer.getTokenizerFactory(Version.CURRENT)));
}
// Tokenizer aliases
tokenizerFactories.put("nGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.NGRAM.getTokenizerFactory(Version.CURRENT)));
tokenizerFactories.put("edgeNGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.EDGE_NGRAM.getTokenizerFactory(Version.CURRENT)));
// Token filters
for (PreBuiltTokenFilters preBuiltTokenFilter : PreBuiltTokenFilters.values()) {
String name = preBuiltTokenFilter.name().toLowerCase(Locale.ROOT);
tokenFilterFactories.put(name, new PreBuiltTokenFilterFactoryFactory(preBuiltTokenFilter.getTokenFilterFactory(Version.CURRENT)));
}
// Token filter aliases
tokenFilterFactories.put("nGram", new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.NGRAM.getTokenFilterFactory(Version.CURRENT)));
tokenFilterFactories.put("edgeNGram", new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.EDGE_NGRAM.getTokenFilterFactory(Version.CURRENT)));
// Char Filters
for (PreBuiltCharFilters preBuiltCharFilter : PreBuiltCharFilters.values()) {
String name = preBuiltCharFilter.name().toLowerCase(Locale.ROOT);
charFilterFactories.put(name, new PreBuiltCharFilterFactoryFactory(preBuiltCharFilter.getCharFilterFactory(Version.CURRENT)));
}
// Char filter aliases
charFilterFactories.put("htmlStrip", new PreBuiltCharFilterFactoryFactory(PreBuiltCharFilters.HTML_STRIP.getCharFilterFactory(Version.CURRENT)));
this.analyzerProviderFactories = Collections.unmodifiableMap(analyzerProviderFactories);
this.charFilterFactories = Collections.unmodifiableMap(charFilterFactories);
this.tokenFilterFactories = Collections.unmodifiableMap(tokenFilterFactories);
this.tokenizerFactories = Collections.unmodifiableMap(tokenizerFactories);
}
public AnalysisModule.AnalysisProvider<CharFilterFactory> getCharFilterFactory(String name) {
return charFilterFactories.get(name);
}
public AnalysisModule.AnalysisProvider<TokenFilterFactory> getTokenFilterFactory(String name) {
return tokenFilterFactories.get(name);
}
public AnalysisModule.AnalysisProvider<TokenizerFactory> getTokenizerFactory(String name) {
return tokenizerFactories.get(name);
}
public AnalysisModule.AnalysisProvider<AnalyzerProvider> getAnalyzerProvider(String name) {
return analyzerProviderFactories.get(name);
}
Analyzer analyzer(String name) {
PreBuiltAnalyzerProviderFactory analyzerProviderFactory = (PreBuiltAnalyzerProviderFactory) analyzerProviderFactories.get(name);
if (analyzerProviderFactory == null) {
return null;
}
return analyzerProviderFactory.analyzer();
}
@Override
public void close() throws IOException {
IOUtils.close(analyzerProviderFactories.values().stream().map((a) -> ((PreBuiltAnalyzerProviderFactory)a).analyzer()).collect(Collectors.toList()));
}
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
@ -29,9 +28,9 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.core.StringFieldMapper;
import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@ -51,160 +50,20 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
private final NamedAnalyzer defaultSearchAnalyzer;
private final NamedAnalyzer defaultSearchQuoteAnalyzer;
public AnalysisService(IndexSettings indexSettings) {
this(indexSettings, null, null, null, null, null);
}
@Inject
public AnalysisService(IndexSettings indexSettings, @Nullable IndicesAnalysisService indicesAnalysisService,
@Nullable Map<String, AnalyzerProviderFactory> analyzerFactoryFactories,
@Nullable Map<String, TokenizerFactoryFactory> tokenizerFactoryFactories,
@Nullable Map<String, CharFilterFactoryFactory> charFilterFactoryFactories,
@Nullable Map<String, TokenFilterFactoryFactory> tokenFilterFactoryFactories) {
public AnalysisService(IndexSettings indexSettings,
Map<String, AnalyzerProvider> analyzerProviders,
Map<String, TokenizerFactory> tokenizerFactoryFactories,
Map<String, CharFilterFactory> charFilterFactoryFactories,
Map<String, TokenFilterFactory> tokenFilterFactoryFactories) {
super(indexSettings);
Settings defaultSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, indexSettings.getIndexVersionCreated()).build();
Map<String, TokenizerFactory> tokenizers = new HashMap<>();
if (tokenizerFactoryFactories != null) {
Map<String, Settings> tokenizersSettings = this.indexSettings.getSettings().getGroups("index.analysis.tokenizer");
for (Map.Entry<String, TokenizerFactoryFactory> entry : tokenizerFactoryFactories.entrySet()) {
String tokenizerName = entry.getKey();
TokenizerFactoryFactory tokenizerFactoryFactory = entry.getValue();
Settings tokenizerSettings = tokenizersSettings.get(tokenizerName);
if (tokenizerSettings == null) {
tokenizerSettings = defaultSettings;
}
TokenizerFactory tokenizerFactory = tokenizerFactoryFactory.create(tokenizerName, tokenizerSettings);
tokenizers.put(tokenizerName, tokenizerFactory);
tokenizers.put(Strings.toCamelCase(tokenizerName), tokenizerFactory);
}
}
if (indicesAnalysisService != null) {
for (Map.Entry<String, PreBuiltTokenizerFactoryFactory> entry : indicesAnalysisService.tokenizerFactories().entrySet()) {
String name = entry.getKey();
if (!tokenizers.containsKey(name)) {
tokenizers.put(name, entry.getValue().create(name, defaultSettings));
}
name = Strings.toCamelCase(entry.getKey());
if (!name.equals(entry.getKey())) {
if (!tokenizers.containsKey(name)) {
tokenizers.put(name, entry.getValue().create(name, defaultSettings));
}
}
}
}
this.tokenizers = unmodifiableMap(tokenizers);
Map<String, CharFilterFactory> charFilters = new HashMap<>();
if (charFilterFactoryFactories != null) {
Map<String, Settings> charFiltersSettings = this.indexSettings.getSettings().getGroups("index.analysis.char_filter");
for (Map.Entry<String, CharFilterFactoryFactory> entry : charFilterFactoryFactories.entrySet()) {
String charFilterName = entry.getKey();
CharFilterFactoryFactory charFilterFactoryFactory = entry.getValue();
Settings charFilterSettings = charFiltersSettings.get(charFilterName);
if (charFilterSettings == null) {
charFilterSettings = defaultSettings;
}
CharFilterFactory tokenFilterFactory = charFilterFactoryFactory.create(charFilterName, charFilterSettings);
charFilters.put(charFilterName, tokenFilterFactory);
charFilters.put(Strings.toCamelCase(charFilterName), tokenFilterFactory);
}
}
if (indicesAnalysisService != null) {
for (Map.Entry<String, PreBuiltCharFilterFactoryFactory> entry : indicesAnalysisService.charFilterFactories().entrySet()) {
String name = entry.getKey();
if (!charFilters.containsKey(name)) {
charFilters.put(name, entry.getValue().create(name, defaultSettings));
}
name = Strings.toCamelCase(entry.getKey());
if (!name.equals(entry.getKey())) {
if (!charFilters.containsKey(name)) {
charFilters.put(name, entry.getValue().create(name, defaultSettings));
}
}
}
}
this.charFilters = unmodifiableMap(charFilters);
Map<String, TokenFilterFactory> tokenFilters = new HashMap<>();
if (tokenFilterFactoryFactories != null) {
Map<String, Settings> tokenFiltersSettings = this.indexSettings.getSettings().getGroups("index.analysis.filter");
for (Map.Entry<String, TokenFilterFactoryFactory> entry : tokenFilterFactoryFactories.entrySet()) {
String tokenFilterName = entry.getKey();
TokenFilterFactoryFactory tokenFilterFactoryFactory = entry.getValue();
Settings tokenFilterSettings = tokenFiltersSettings.get(tokenFilterName);
if (tokenFilterSettings == null) {
tokenFilterSettings = defaultSettings;
}
TokenFilterFactory tokenFilterFactory = tokenFilterFactoryFactory.create(tokenFilterName, tokenFilterSettings);
tokenFilters.put(tokenFilterName, tokenFilterFactory);
tokenFilters.put(Strings.toCamelCase(tokenFilterName), tokenFilterFactory);
}
}
// pre initialize the globally registered ones into the map
if (indicesAnalysisService != null) {
for (Map.Entry<String, PreBuiltTokenFilterFactoryFactory> entry : indicesAnalysisService.tokenFilterFactories().entrySet()) {
String name = entry.getKey();
if (!tokenFilters.containsKey(name)) {
tokenFilters.put(name, entry.getValue().create(name, defaultSettings));
}
name = Strings.toCamelCase(entry.getKey());
if (!name.equals(entry.getKey())) {
if (!tokenFilters.containsKey(name)) {
tokenFilters.put(name, entry.getValue().create(name, defaultSettings));
}
}
}
}
this.tokenFilters = unmodifiableMap(tokenFilters);
Map<String, AnalyzerProvider> analyzerProviders = new HashMap<>();
if (analyzerFactoryFactories != null) {
Map<String, Settings> analyzersSettings = this.indexSettings.getSettings().getGroups("index.analysis.analyzer");
for (Map.Entry<String, AnalyzerProviderFactory> entry : analyzerFactoryFactories.entrySet()) {
String analyzerName = entry.getKey();
AnalyzerProviderFactory analyzerFactoryFactory = entry.getValue();
Settings analyzerSettings = analyzersSettings.get(analyzerName);
if (analyzerSettings == null) {
analyzerSettings = defaultSettings;
}
AnalyzerProvider analyzerFactory = analyzerFactoryFactory.create(analyzerName, analyzerSettings);
analyzerProviders.put(analyzerName, analyzerFactory);
}
}
if (indicesAnalysisService != null) {
for (Map.Entry<String, PreBuiltAnalyzerProviderFactory> entry : indicesAnalysisService.analyzerProviderFactories().entrySet()) {
String name = entry.getKey();
Version indexVersion = indexSettings.getIndexVersionCreated();
if (!analyzerProviders.containsKey(name)) {
analyzerProviders.put(name, entry.getValue().create(name, Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, indexVersion).build()));
}
String camelCaseName = Strings.toCamelCase(name);
if (!camelCaseName.equals(entry.getKey()) && !analyzerProviders.containsKey(camelCaseName)) {
analyzerProviders.put(camelCaseName, entry.getValue().create(name, Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, indexVersion).build()));
}
}
}
this.tokenizers = unmodifiableMap(tokenizerFactoryFactories);
this.charFilters = unmodifiableMap(charFilterFactoryFactories);
this.tokenFilters = unmodifiableMap(tokenFilterFactoryFactories);
analyzerProviders = new HashMap<>(analyzerProviders);
if (!analyzerProviders.containsKey("default")) {
analyzerProviders.put("default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS));
}
if (!analyzerProviders.containsKey("default_index")) {
analyzerProviders.put("default_index", analyzerProviders.get("default"));
}
if (!analyzerProviders.containsKey("default_search")) {
analyzerProviders.put("default_search", analyzerProviders.get("default"));
}
@ -213,7 +72,9 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
}
Map<String, NamedAnalyzer> analyzers = new HashMap<>();
for (AnalyzerProvider analyzerFactory : analyzerProviders.values()) {
for (Map.Entry<String, AnalyzerProvider> entry : analyzerProviders.entrySet()) {
AnalyzerProvider analyzerFactory = entry.getValue();
String name = entry.getKey();
/*
* Lucene defaults positionIncrementGap to 0 in all analyzers but
* Elasticsearch defaults them to 0 only before version 2.0
@ -245,10 +106,12 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap);
}
} else {
analyzer = new NamedAnalyzer(analyzerFactory.name(), analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
}
analyzers.put(analyzerFactory.name(), analyzer);
analyzers.put(Strings.toCamelCase(analyzerFactory.name()), analyzer);
if (analyzers.containsKey(name)) {
throw new IllegalStateException("already registered analyzer with name: " + name);
}
analyzers.put(name, analyzer);
String strAliases = this.indexSettings.getSettings().get("index.analysis.analyzer." + analyzerFactory.name() + ".alias");
if (strAliases != null) {
for (String alias : Strings.commaDelimitedListToStringArray(strAliases)) {

View File

@ -1,32 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import java.lang.annotation.*;
/**
* A marker annotation on {@link CharFilterFactory}, {@link AnalyzerProvider}, {@link TokenFilterFactory},
* or {@link TokenizerFactory} which will cause the provider/factory to only be created when explicit settings
* are provided.
*/
@Target({ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface AnalysisSettingsRequired {
}

View File

@ -1,30 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.elasticsearch.common.settings.Settings;
/**
*
*/
public interface AnalyzerProviderFactory {
AnalyzerProvider create(String name, Settings settings);
}

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tr.ApostropheFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ApostropheFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ApostropheFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ApostropheFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class ArabicAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arabic
private final ArabicAnalyzer arabicAnalyzer;
@Inject
public ArabicAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public ArabicAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
arabicAnalyzer = new ArabicAnalyzer(Analysis.parseStopWords(env, settings, ArabicAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ArabicNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ArabicNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ArabicNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ar.ArabicStemFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -31,8 +30,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ArabicStemTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ArabicStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ArabicStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.hy.ArmenianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class ArmenianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arme
private final ArmenianAnalyzer analyzer;
@Inject
public ArmenianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public ArmenianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new ArmenianAnalyzer(Analysis.parseStopWords(env, settings, ArmenianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.eu.BasqueAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class BasqueAnalyzerProvider extends AbstractIndexAnalyzerProvider<Basque
private final BasqueAnalyzer analyzer;
@Inject
public BasqueAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public BasqueAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new BasqueAnalyzer(Analysis.parseStopWords(env, settings, BasqueAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class BrazilianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bra
private final BrazilianAnalyzer analyzer;
@Inject
public BrazilianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public BrazilianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new BrazilianAnalyzer(Analysis.parseStopWords(env, settings, BrazilianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.br.BrazilianStemFilter;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
*
@ -34,8 +33,7 @@ public class BrazilianStemTokenFilterFactory extends AbstractTokenFilterFactory
private final CharArraySet exclusions;
@Inject
public BrazilianStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public BrazilianStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class BulgarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bul
private final BulgarianAnalyzer analyzer;
@Inject
public BulgarianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public BulgarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new BulgarianAnalyzer(Analysis.parseStopWords(env, settings, BulgarianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cjk.CJKBigramFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import java.util.Arrays;
@ -49,8 +48,7 @@ public final class CJKBigramFilterFactory extends AbstractTokenFilterFactory {
private final int flags;
private final boolean outputUnigrams;
@Inject
public CJKBigramFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public CJKBigramFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
outputUnigrams = settings.getAsBoolean("output_unigrams", false);
final String[] asArray = settings.getAsArray("ignored_scripts");

View File

@ -21,14 +21,13 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cjk.CJKWidthFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
public final class CJKWidthFilterFactory extends AbstractTokenFilterFactory {
@Inject
public CJKWidthFilterFactory(IndexSettings indexSettings, String name, Settings settings) {
public CJKWidthFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ca.CatalanAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class CatalanAnalyzerProvider extends AbstractIndexAnalyzerProvider<Catal
private final CatalanAnalyzer analyzer;
@Inject
public CatalanAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public CatalanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new CatalanAnalyzer(Analysis.parseStopWords(env, settings, CatalanAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -1,30 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.elasticsearch.common.settings.Settings;
/**
*
*/
public interface CharFilterFactoryFactory {
CharFilterFactory create(String name, Settings settings);
}

View File

@ -20,9 +20,8 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -32,8 +31,7 @@ public class ChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider<Stand
private final StandardAnalyzer analyzer;
@Inject
public ChineseAnalyzerProvider(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ChineseAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
// old index: best effort
analyzer = new StandardAnalyzer();

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class CjkAnalyzerProvider extends AbstractIndexAnalyzerProvider<CJKAnalyz
private final CJKAnalyzer analyzer;
@Inject
public CjkAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public CjkAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
CharArraySet stopWords = Analysis.parseStopWords(env, settings, CJKAnalyzer.getDefaultStopSet());

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.ClassicFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ClassicFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ClassicFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ClassicFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -22,9 +22,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.ClassicTokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -34,8 +33,7 @@ public class ClassicTokenizerFactory extends AbstractTokenizerFactory {
private final int maxTokenLength;
@Inject
public ClassicTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}

View File

@ -23,8 +23,6 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
import org.apache.lucene.analysis.commongrams.CommonGramsQueryFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -32,7 +30,6 @@ import org.elasticsearch.index.IndexSettings;
/**
*
*/
@AnalysisSettingsRequired
public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet words;
@ -41,8 +38,7 @@ public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory {
private final boolean queryMode;
@Inject
public CommonGramsTokenFilterFactory(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public CommonGramsTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
this.queryMode = settings.getAsBoolean("query_mode", false);

View File

@ -20,8 +20,6 @@
package org.elasticsearch.index.analysis;
import org.elasticsearch.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.core.StringFieldMapper;
@ -39,9 +37,8 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
private CustomAnalyzer customAnalyzer;
@Inject
public CustomAnalyzerProvider(IndexSettings indexSettings,
@Assisted String name, @Assisted Settings settings) {
String name, Settings settings) {
super(indexSettings, name, settings);
this.analyzerSettings = settings;
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.cz.CzechAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class CzechAnalyzerProvider extends AbstractIndexAnalyzerProvider<CzechAn
private final CzechAnalyzer analyzer;
@Inject
public CzechAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public CzechAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new CzechAnalyzer(Analysis.parseStopWords(env, settings, CzechAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -20,15 +20,13 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cz.CzechStemFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
public class CzechStemTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public CzechStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public CzechStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.da.DanishAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class DanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Danish
private final DanishAnalyzer analyzer;
@Inject
public DanishAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public DanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new DanishAnalyzer(Analysis.parseStopWords(env, settings, DanishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.DecimalDigitFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +30,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public final class DecimalDigitFilterFactory extends AbstractTokenFilterFactory {
@Inject
public DecimalDigitFilterFactory(IndexSettings indexSettings, String name, Settings settings) {
public DecimalDigitFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.payloads.*;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -41,9 +39,8 @@ public class DelimitedPayloadTokenFilterFactory extends AbstractTokenFilterFacto
char delimiter;
PayloadEncoder encoder;
@Inject
public DelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, @Assisted String name,
@Assisted Settings settings) {
public DelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, String name,
Settings settings) {
super(indexSettings, name, settings);
String delimiterConf = settings.get(DELIMITER);
if (delimiterConf != null) {

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.nl.DutchAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class DutchAnalyzerProvider extends AbstractIndexAnalyzerProvider<DutchAn
private final DutchAnalyzer analyzer;
@Inject
public DutchAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public DutchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new DutchAnalyzer(Analysis.parseStopWords(env, settings, DutchAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.tartarus.snowball.ext.DutchStemmer;
@ -36,8 +35,7 @@ public class DutchStemTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet exclusions;
@Inject
public DutchStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public DutchStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
}

View File

@ -25,9 +25,8 @@ import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter;
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -46,8 +45,7 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
private org.elasticsearch.Version esVersion;
@Inject
public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);

View File

@ -24,9 +24,8 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
import org.apache.lucene.analysis.ngram.NGramTokenizer;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenChars;
@ -48,8 +47,7 @@ public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
protected org.elasticsearch.Version esVersion;
@Inject
public EdgeNGramTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);

View File

@ -22,8 +22,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.ElisionFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -35,8 +33,7 @@ public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet articles;
@Inject
public ElisionTokenFilterFactory(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public ElisionTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
this.articles = Analysis.parseArticles(env, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class EnglishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Engli
private final EnglishAnalyzer analyzer;
@Inject
public EnglishAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public EnglishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new EnglishAnalyzer(Analysis.parseStopWords(env, settings, EnglishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.fi.FinnishAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class FinnishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Finni
private final FinnishAnalyzer analyzer;
@Inject
public FinnishAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public FinnishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new FinnishAnalyzer(Analysis.parseStopWords(env, settings, FinnishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class FrenchAnalyzerProvider extends AbstractIndexAnalyzerProvider<French
private final FrenchAnalyzer analyzer;
@Inject
public FrenchAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public FrenchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new FrenchAnalyzer(Analysis.parseStopWords(env, settings, FrenchAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.tartarus.snowball.ext.FrenchStemmer;
@ -36,8 +35,7 @@ public class FrenchStemTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet exclusions;
@Inject
public FrenchStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public FrenchStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.gl.GalicianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class GalicianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Gali
private final GalicianAnalyzer analyzer;
@Inject
public GalicianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public GalicianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new GalicianAnalyzer(Analysis.parseStopWords(env, settings, GalicianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.de.GermanAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class GermanAnalyzerProvider extends AbstractIndexAnalyzerProvider<German
private final GermanAnalyzer analyzer;
@Inject
public GermanAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public GermanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new GermanAnalyzer(Analysis.parseStopWords(env, settings, GermanAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.de.GermanNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class GermanNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public GermanNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public GermanNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.de.GermanStemFilter;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -35,8 +34,7 @@ public class GermanStemTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet exclusions;
@Inject
public GermanStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public GermanStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
}

View File

@ -20,8 +20,6 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.el.GreekAnalyzer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -33,8 +31,7 @@ public class GreekAnalyzerProvider extends AbstractIndexAnalyzerProvider<GreekAn
private final GreekAnalyzer analyzer;
@Inject
public GreekAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public GreekAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new GreekAnalyzer(Analysis.parseStopWords(env, settings, GreekAnalyzer.getDefaultStopSet()));
analyzer.setVersion(version);

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.hi.HindiAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class HindiAnalyzerProvider extends AbstractIndexAnalyzerProvider<HindiAn
private final HindiAnalyzer analyzer;
@Inject
public HindiAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public HindiAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new HindiAnalyzer(Analysis.parseStopWords(env, settings, HindiAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.hi.HindiNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class HindiNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public HindiNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public HindiNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,9 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.charfilter.HTMLStripCharFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.env.Environment;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import java.io.Reader;
@ -34,8 +35,7 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet;
public class HtmlStripCharFilterFactory extends AbstractCharFilterFactory {
private final Set<String> escapedTags;
@Inject
public HtmlStripCharFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public HtmlStripCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name);
String[] escapedTags = settings.getAsArray("escaped_tags");
if (escapedTags.length > 0) {

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.hu.HungarianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class HungarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Hun
private final HungarianAnalyzer analyzer;
@Inject
public HungarianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public HungarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new HungarianAnalyzer(Analysis.parseStopWords(env, settings, HungarianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,23 +21,19 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.hunspell.Dictionary;
import org.apache.lucene.analysis.hunspell.HunspellStemFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.indices.analysis.HunspellService;
import java.util.Locale;
@AnalysisSettingsRequired
public class HunspellTokenFilterFactory extends AbstractTokenFilterFactory {
private final Dictionary dictionary;
private final boolean dedup;
private final boolean longestOnly;
@Inject
public HunspellTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings, HunspellService hunspellService) {
public HunspellTokenFilterFactory(IndexSettings indexSettings, String name, Settings settings, HunspellService hunspellService) {
super(indexSettings, name, settings);
String locale = settings.get("locale", settings.get("language", settings.get("lang", null)));

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.in.IndicNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class IndicNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public IndicNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public IndicNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.id.IndonesianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class IndonesianAnalyzerProvider extends AbstractIndexAnalyzerProvider<In
private final IndonesianAnalyzer analyzer;
@Inject
public IndonesianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public IndonesianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new IndonesianAnalyzer(Analysis.parseStopWords(env, settings, IndonesianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ga.IrishAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class IrishAnalyzerProvider extends AbstractIndexAnalyzerProvider<IrishAn
private final IrishAnalyzer analyzer;
@Inject
public IrishAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public IrishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new IrishAnalyzer(Analysis.parseStopWords(env, settings, IrishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.it.ItalianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class ItalianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Itali
private final ItalianAnalyzer analyzer;
@Inject
public ItalianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public ItalianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new ItalianAnalyzer(Analysis.parseStopWords(env, settings, ItalianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,15 +21,13 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.en.KStemFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
public class KStemTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public KStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public KStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,10 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.TypeTokenFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import java.util.Arrays;
@ -41,14 +39,12 @@ import java.util.Set;
* <li>{@value #KEEP_TYPES_KEY} the array of words / tokens to keep.</li>
* </ul>
*/
@AnalysisSettingsRequired
public class KeepTypesFilterFactory extends AbstractTokenFilterFactory {
private final Set<String> keepTypes;
private static final String KEEP_TYPES_KEY = "types";
@Inject
public KeepTypesFilterFactory(IndexSettings indexSettings,
Environment env, @Assisted String name, @Assisted Settings settings) {
Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
final String[] arrayKeepTypes = settings.getAsArray(KEEP_TYPES_KEY, null);

View File

@ -24,10 +24,8 @@ import org.apache.lucene.analysis.miscellaneous.KeepWordFilter;
import org.apache.lucene.analysis.miscellaneous.Lucene43KeepWordFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
/**
@ -51,7 +49,6 @@ import org.elasticsearch.index.IndexSettings;
*
* @see StopTokenFilterFactory
*/
@AnalysisSettingsRequired
public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet keepWords;
private final boolean enablePositionIncrements;
@ -60,9 +57,8 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
private static final String KEEP_WORDS_CASE_KEY = KEEP_WORDS_KEY + "_case"; // for javadoc
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
@Inject
public KeepWordFilterFactory(IndexSettings indexSettings,
Environment env, @Assisted String name, @Assisted Settings settings) {
Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
final String[] arrayKeepWords = settings.getAsArray(KEEP_WORDS_KEY, null);

View File

@ -20,9 +20,8 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.core.KeywordAnalyzer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -32,8 +31,7 @@ public class KeywordAnalyzerProvider extends AbstractIndexAnalyzerProvider<Keywo
private final KeywordAnalyzer keywordAnalyzer;
@Inject
public KeywordAnalyzerProvider(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public KeywordAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.keywordAnalyzer = new KeywordAnalyzer();
}

View File

@ -22,21 +22,17 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import java.util.Set;
@AnalysisSettingsRequired
public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet keywordLookup;
@Inject
public KeywordMarkerTokenFilterFactory(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public KeywordMarkerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
boolean ignoreCase = settings.getAsBoolean("ignore_case", false);

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -33,8 +32,7 @@ public class KeywordTokenizerFactory extends AbstractTokenizerFactory {
private final int bufferSize;
@Inject
public KeywordTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public KeywordTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
bufferSize = settings.getAsInt("buffer_size", 256);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.lv.LatvianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class LatvianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Latvi
private final LatvianAnalyzer analyzer;
@Inject
public LatvianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public LatvianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new LatvianAnalyzer(Analysis.parseStopWords(env, settings, LatvianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.LengthFilter;
import org.apache.lucene.analysis.miscellaneous.Lucene43LengthFilter;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -38,8 +37,7 @@ public class LengthTokenFilterFactory extends AbstractTokenFilterFactory {
private final boolean enablePositionIncrements;
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
@Inject
public LengthTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public LengthTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
min = settings.getAsInt("min", 0);
max = settings.getAsInt("max", Integer.MAX_VALUE);

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.LetterTokenizer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -31,8 +30,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class LetterTokenizerFactory extends AbstractTokenizerFactory {
@Inject
public LetterTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -38,8 +36,7 @@ public class LimitTokenCountFilterFactory extends AbstractTokenFilterFactory {
final int maxTokenCount;
final boolean consumeAllTokens;
@Inject
public LimitTokenCountFilterFactory(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public LimitTokenCountFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
this.maxTokenCount = settings.getAsInt("max_token_count", DEFAULT_MAX_TOKEN_COUNT);
this.consumeAllTokens = settings.getAsBoolean("consume_all_tokens", DEFAULT_CONSUME_ALL_TOKENS);

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.lt.LithuanianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class LithuanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Li
private final LithuanianAnalyzer analyzer;
@Inject
public LithuanianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public LithuanianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new LithuanianAnalyzer(Analysis.parseStopWords(env, settings, LithuanianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -24,9 +24,8 @@ import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.el.GreekLowerCaseFilter;
import org.apache.lucene.analysis.ga.IrishLowerCaseFilter;
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -42,8 +41,7 @@ public class LowerCaseTokenFilterFactory extends AbstractTokenFilterFactory {
private final String lang;
@Inject
public LowerCaseTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public LowerCaseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.lang = settings.get("language", null);
}

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -31,8 +30,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory {
@Inject
public LowerCaseTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.charfilter.MappingCharFilter;
import org.apache.lucene.analysis.charfilter.NormalizeCharMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -32,13 +30,11 @@ import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@AnalysisSettingsRequired
public class MappingCharFilterFactory extends AbstractCharFilterFactory {
private final NormalizeCharMap normMap;
@Inject
public MappingCharFilterFactory(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public MappingCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name);
List<String> rules = Analysis.getWordList(env, settings, "mappings");

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenFilter;
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -39,8 +38,7 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
private final int maxGram;
@Inject
public NGramTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public NGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer;
import org.apache.lucene.analysis.ngram.NGramTokenizer;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import java.lang.reflect.Field;
@ -88,8 +87,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory {
return builder.build();
}
@Inject
public NGramTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.no.NorwegianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class NorwegianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Nor
private final NorwegianAnalyzer analyzer;
@Inject
public NorwegianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public NorwegianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new NorwegianAnalyzer(Analysis.parseStopWords(env, settings, NorwegianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -22,9 +22,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
@ -36,8 +35,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
private final int skip;
private final boolean reverse;
@Inject
public PathHierarchyTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
bufferSize = settings.getAsInt("buffer_size", 1024);
String delimiter = settings.get("delimiter");

View File

@ -23,8 +23,6 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@ -39,8 +37,7 @@ public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analy
private final PatternAnalyzer analyzer;
@Inject
public PatternAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public PatternAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
Version esVersion = indexSettings.getIndexVersionCreated();

View File

@ -22,23 +22,19 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.pattern.PatternCaptureGroupTokenFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.env.Environment;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import java.util.regex.Pattern;
@AnalysisSettingsRequired
public class PatternCaptureGroupTokenFilterFactory extends AbstractTokenFilterFactory {
private final Pattern[] patterns;
private final boolean preserveOriginal;
private static final String PATTERNS_KEY = "patterns";
private static final String PRESERVE_ORIG_KEY = "preserve_original";
@Inject
public PatternCaptureGroupTokenFilterFactory(IndexSettings indexSettings, @Assisted String name,
@Assisted Settings settings) {
public PatternCaptureGroupTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
String[] regexes = settings.getAsArray(PATTERNS_KEY, null, false);
if (regexes == null) {

View File

@ -20,22 +20,19 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.pattern.PatternReplaceCharFilter;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import java.io.Reader;
import java.util.regex.Pattern;
@AnalysisSettingsRequired
public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory {
private final Pattern pattern;
private final String replacement;
@Inject
public PatternReplaceCharFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public PatternReplaceCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name);
if (!Strings.hasLength(settings.get("pattern"))) {

View File

@ -21,23 +21,20 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.pattern.PatternReplaceFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.env.Environment;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import java.util.regex.Pattern;
@AnalysisSettingsRequired
public class PatternReplaceTokenFilterFactory extends AbstractTokenFilterFactory {
private final Pattern pattern;
private final String replacement;
private final boolean all;
@Inject
public PatternReplaceTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public PatternReplaceTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
String sPattern = settings.get("pattern", null);

View File

@ -21,10 +21,9 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.pattern.PatternTokenizer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import java.util.regex.Pattern;
@ -34,8 +33,7 @@ public class PatternTokenizerFactory extends AbstractTokenizerFactory {
private final Pattern pattern;
private final int group;
@Inject
public PatternTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);

View File

@ -20,8 +20,6 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.fa.PersianAnalyzer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -33,8 +31,7 @@ public class PersianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Persi
private final PersianAnalyzer analyzer;
@Inject
public PersianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public PersianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new PersianAnalyzer(Analysis.parseStopWords(env, settings, PersianAnalyzer.getDefaultStopSet()));
analyzer.setVersion(version);

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class PersianNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public PersianNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public PersianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.en.PorterStemFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -31,8 +30,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class PorterStemTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public PorterStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public PorterStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.pt.PortugueseAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class PortugueseAnalyzerProvider extends AbstractIndexAnalyzerProvider<Po
private final PortugueseAnalyzer analyzer;
@Inject
public PortugueseAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public PortugueseAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new PortugueseAnalyzer(Analysis.parseStopWords(env, settings, PortugueseAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -22,12 +22,17 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
import java.io.IOException;
/**
*
*/
public class PreBuiltAnalyzerProviderFactory implements AnalyzerProviderFactory {
public class PreBuiltAnalyzerProviderFactory implements AnalysisModule.AnalysisProvider<AnalyzerProvider> {
private final PreBuiltAnalyzerProvider analyzerProvider;
@ -35,7 +40,6 @@ public class PreBuiltAnalyzerProviderFactory implements AnalyzerProviderFactory
analyzerProvider = new PreBuiltAnalyzerProvider(name, scope, analyzer);
}
@Override
public AnalyzerProvider create(String name, Settings settings) {
Version indexVersion = Version.indexCreated(settings);
if (!Version.CURRENT.equals(indexVersion)) {
@ -49,6 +53,11 @@ public class PreBuiltAnalyzerProviderFactory implements AnalyzerProviderFactory
return analyzerProvider;
}
@Override
public AnalyzerProvider get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {
return create(name, settings);
}
public Analyzer analyzer() {
return analyzerProvider.get();
}

View File

@ -21,9 +21,14 @@ package org.elasticsearch.index.analysis;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.indices.analysis.PreBuiltCharFilters;
public class PreBuiltCharFilterFactoryFactory implements CharFilterFactoryFactory {
import java.io.IOException;
public class PreBuiltCharFilterFactoryFactory implements AnalysisModule.AnalysisProvider<CharFilterFactory> {
private final CharFilterFactory charFilterFactory;
@ -32,7 +37,7 @@ public class PreBuiltCharFilterFactoryFactory implements CharFilterFactoryFactor
}
@Override
public CharFilterFactory create(String name, Settings settings) {
public CharFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {
Version indexVersion = Version.indexCreated(settings);
if (!Version.CURRENT.equals(indexVersion)) {
PreBuiltCharFilters preBuiltCharFilters = PreBuiltCharFilters.getOrDefault(name, null);

View File

@ -21,9 +21,14 @@ package org.elasticsearch.index.analysis;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.indices.analysis.PreBuiltTokenFilters;
public class PreBuiltTokenFilterFactoryFactory implements TokenFilterFactoryFactory {
import java.io.IOException;
public class PreBuiltTokenFilterFactoryFactory implements AnalysisModule.AnalysisProvider<TokenFilterFactory> {
private final TokenFilterFactory tokenFilterFactory;
@ -32,7 +37,7 @@ public class PreBuiltTokenFilterFactoryFactory implements TokenFilterFactoryFact
}
@Override
public TokenFilterFactory create(String name, Settings settings) {
public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {
Version indexVersion = Version.indexCreated(settings);
if (!Version.CURRENT.equals(indexVersion)) {
PreBuiltTokenFilters preBuiltTokenFilters = PreBuiltTokenFilters.getOrDefault(name, null);

View File

@ -21,9 +21,14 @@ package org.elasticsearch.index.analysis;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.indices.analysis.PreBuiltTokenizers;
public class PreBuiltTokenizerFactoryFactory implements TokenizerFactoryFactory {
import java.io.IOException;
public class PreBuiltTokenizerFactoryFactory implements AnalysisModule.AnalysisProvider<TokenizerFactory> {
private final TokenizerFactory tokenizerFactory;
@ -31,8 +36,7 @@ public class PreBuiltTokenizerFactoryFactory implements TokenizerFactoryFactory
this.tokenizerFactory = tokenizerFactory;
}
@Override
public TokenizerFactory create(String name, Settings settings) {
public TokenizerFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {
Version indexVersion = Version.indexCreated(settings);
if (!Version.CURRENT.equals(indexVersion)) {
PreBuiltTokenizers preBuiltTokenizers = PreBuiltTokenizers.getOrDefault(name, null);

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -31,8 +30,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ReverseTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ReverseTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ReverseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ro.RomanianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class RomanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Roma
private final RomanianAnalyzer analyzer;
@Inject
public RomanianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public RomanianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new RomanianAnalyzer(Analysis.parseStopWords(env, settings, RomanianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ru.RussianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class RussianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Russi
private final RussianAnalyzer analyzer;
@Inject
public RussianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public RussianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new RussianAnalyzer(Analysis.parseStopWords(env, settings, RussianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -31,8 +30,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class RussianStemTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public RussianStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public RussianStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.ScandinavianFoldingFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ScandinavianFoldingFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ScandinavianFoldingFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ScandinavianFoldingFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ScandinavianNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ScandinavianNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ScandinavianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,17 +21,15 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.sr.SerbianNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
*
*/
public class SerbianNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public SerbianNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public SerbianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.shingle.ShingleFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -33,8 +32,7 @@ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory {
private final Factory factory;
@Inject
public ShingleTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
Integer maxShingleSize = settings.getAsInt("max_shingle_size", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);
Integer minShingleSize = settings.getAsInt("min_shingle_size", ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);

View File

@ -20,9 +20,8 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.core.SimpleAnalyzer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -32,8 +31,7 @@ public class SimpleAnalyzerProvider extends AbstractIndexAnalyzerProvider<Simple
private final SimpleAnalyzer simpleAnalyzer;
@Inject
public SimpleAnalyzerProvider(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public SimpleAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.simpleAnalyzer = new SimpleAnalyzer();
this.simpleAnalyzer.setVersion(version);

View File

@ -23,8 +23,6 @@ import org.apache.lucene.analysis.de.GermanAnalyzer;
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
import org.apache.lucene.analysis.nl.DutchAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -62,8 +60,7 @@ public class SnowballAnalyzerProvider extends AbstractIndexAnalyzerProvider<Snow
private final SnowballAnalyzer analyzer;
@Inject
public SnowballAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public SnowballAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
String language = settings.get("language", settings.get("name", "English"));

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -34,8 +33,7 @@ public class SnowballTokenFilterFactory extends AbstractTokenFilterFactory {
private String language;
@Inject
public SnowballTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public SnowballTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.language = Strings.capitalize(settings.get("language", settings.get("name", "English")));
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ckb.SoraniAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class SoraniAnalyzerProvider extends AbstractIndexAnalyzerProvider<Sorani
private final SoraniAnalyzer analyzer;
@Inject
public SoraniAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public SoraniAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new SoraniAnalyzer(Analysis.parseStopWords(env, settings, SoraniAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class SoraniNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public SoraniNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public SoraniNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

Some files were not shown because too many files have changed in this diff Show More