Remove AnalysisService and reduce it to a simple name to analyzer mapping (#20627)

Today we hold on to all possible tokenizers, tokenfilters etc. when we create
an index service on a node. This was mainly done to allow the `_analyze` API to
directly access all these primitive. We fixed this in #19827 and can now get rid of
the AnalysisService entirely and replace it with a simple map like class. This
ensures we don't create a gazillion long living objects that are entirely useless since
they are never used in most of the indices. Also those objects might consume a considerable
amount of memory since they might load stopwords or synonyms etc.

Closes #19828
This commit is contained in:
Simon Willnauer 2016-09-23 08:53:50 +02:00 committed by GitHub
parent e3b7b4f032
commit fe1803c957
78 changed files with 824 additions and 780 deletions

View File

@ -342,7 +342,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicyConfig.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLog.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisRegistry.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CommonGramsTokenFilterFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CustomAnalyzerProvider.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]NumericDoubleAnalyzer.java" checks="LineLength" />

View File

@ -45,9 +45,9 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.CharFilterFactory;
import org.elasticsearch.index.analysis.CustomAnalyzer;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.index.analysis.TokenizerFactory;
@ -145,45 +145,46 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
}
}
final AnalysisRegistry analysisRegistry = indicesService.getAnalysis();
return analyze(request, field, analyzer, indexService != null ? indexService.analysisService() : null, analysisRegistry, environment);
return analyze(request, field, analyzer, indexService != null ? indexService.getIndexAnalyzers() : null, analysisRegistry, environment);
} catch (IOException e) {
throw new ElasticsearchException("analysis failed", e);
}
}
public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, AnalysisService analysisService, AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, IndexAnalyzers indexAnalyzers, AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
boolean closeAnalyzer = false;
if (analyzer == null && request.analyzer() != null) {
if (analysisService == null) {
if (indexAnalyzers == null) {
analyzer = analysisRegistry.getAnalyzer(request.analyzer());
if (analyzer == null) {
throw new IllegalArgumentException("failed to find global analyzer [" + request.analyzer() + "]");
}
} else {
analyzer = analysisService.analyzer(request.analyzer());
analyzer = indexAnalyzers.get(request.analyzer());
if (analyzer == null) {
throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
}
}
} else if (request.tokenizer() != null) {
TokenizerFactory tokenizerFactory = parseTokenizerFactory(request, analysisService, analysisRegistry, environment);
final IndexSettings indexSettings = indexAnalyzers == null ? null : indexAnalyzers.getIndexSettings();
TokenizerFactory tokenizerFactory = parseTokenizerFactory(request, indexAnalyzers, analysisRegistry, environment);
TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
tokenFilterFactories = getTokenFilterFactories(request, analysisService, analysisRegistry, environment, tokenFilterFactories);
tokenFilterFactories = getTokenFilterFactories(request, indexSettings, analysisRegistry, environment, tokenFilterFactories);
CharFilterFactory[] charFilterFactories = new CharFilterFactory[0];
charFilterFactories = getCharFilterFactories(request, analysisService, analysisRegistry, environment, charFilterFactories);
charFilterFactories = getCharFilterFactories(request, indexSettings, analysisRegistry, environment, charFilterFactories);
analyzer = new CustomAnalyzer(tokenizerFactory, charFilterFactories, tokenFilterFactories);
closeAnalyzer = true;
} else if (analyzer == null) {
if (analysisService == null) {
if (indexAnalyzers == null) {
analyzer = analysisRegistry.getAnalyzer("standard");
} else {
analyzer = analysisService.defaultIndexAnalyzer();
analyzer = indexAnalyzers.getDefaultIndexAnalyzer();
}
}
if (analyzer == null) {
@ -446,7 +447,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
return extendedAttributes;
}
private static CharFilterFactory[] getCharFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
private static CharFilterFactory[] getCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
Environment environment, CharFilterFactory[] charFilterFactories) throws IOException {
if (request.charFilters() != null && request.charFilters().size() > 0) {
charFilterFactories = new CharFilterFactory[request.charFilters().size()];
@ -468,19 +469,19 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
charFilterFactories[i] = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter_[" + i + "]", settings);
} else {
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory;
if (analysisService == null) {
if (indexSettings == null) {
charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name);
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global char filter under [" + charFilter.name + "]");
}
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilter.name);
} else {
charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name, analysisService.getIndexSettings());
charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name, indexSettings);
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
}
charFilterFactories[i] = charFilterFactoryFactory.get(analysisService.getIndexSettings(), environment, charFilter.name,
AnalysisRegistry.getSettingsFromIndexSettings(analysisService.getIndexSettings(),
charFilterFactories[i] = charFilterFactoryFactory.get(indexSettings, environment, charFilter.name,
AnalysisRegistry.getSettingsFromIndexSettings(indexSettings,
AnalysisRegistry.INDEX_ANALYSIS_CHAR_FILTER + "." + charFilter.name));
}
}
@ -492,7 +493,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
return charFilterFactories;
}
private static TokenFilterFactory[] getTokenFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
private static TokenFilterFactory[] getTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
Environment environment, TokenFilterFactory[] tokenFilterFactories) throws IOException {
if (request.tokenFilters() != null && request.tokenFilters().size() > 0) {
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().size()];
@ -514,19 +515,19 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter_[" + i + "]", settings);
} else {
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory;
if (analysisService == null) {
if (indexSettings == null) {
tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name);
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilter.name + "]");
}
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilter.name);
} else {
tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name, analysisService.getIndexSettings());
tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name, indexSettings);
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilter.name + "]");
}
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(analysisService.getIndexSettings(), environment, tokenFilter.name,
AnalysisRegistry.getSettingsFromIndexSettings(analysisService.getIndexSettings(),
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(indexSettings, environment, tokenFilter.name,
AnalysisRegistry.getSettingsFromIndexSettings(indexSettings,
AnalysisRegistry.INDEX_ANALYSIS_FILTER + "." + tokenFilter.name));
}
}
@ -538,7 +539,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
return tokenFilterFactories;
}
private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, AnalysisService analysisService,
private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, IndexAnalyzers indexAnalzyers,
AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
TokenizerFactory tokenizerFactory;
final AnalyzeRequest.NameOrDefinition tokenizer = request.tokenizer();
@ -558,19 +559,19 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
tokenizerFactory = tokenizerFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenizer", settings);
} else {
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory;
if (analysisService == null) {
if (indexAnalzyers == null) {
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name);
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizer.name + "]");
}
tokenizerFactory = tokenizerFactoryFactory.get(environment, tokenizer.name);
} else {
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name, analysisService.getIndexSettings());
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name, indexAnalzyers.getIndexSettings());
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find tokenizer under [" + tokenizer.name + "]");
}
tokenizerFactory = tokenizerFactoryFactory.get(analysisService.getIndexSettings(), environment, tokenizer.name,
AnalysisRegistry.getSettingsFromIndexSettings(analysisService.getIndexSettings(),
tokenizerFactory = tokenizerFactoryFactory.get(indexAnalzyers.getIndexSettings(), environment, tokenizer.name,
AnalysisRegistry.getSettingsFromIndexSettings(indexAnalzyers.getIndexSettings(),
AnalysisRegistry.INDEX_ANALYSIS_TOKENIZER + "." + tokenizer.name));
}
}

View File

@ -26,13 +26,16 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry;
import java.util.AbstractMap;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
/**
* This service is responsible for upgrading legacy index metadata to the current version
@ -112,9 +115,30 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings);
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
final NamedAnalyzer fakeDefault = new NamedAnalyzer("fake_default", new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
throw new UnsupportedOperationException("shouldn't be here");
}
});
// this is just a fake map that always returns the same value for any possible string key
// also the entrySet impl isn't fully correct but we implement it since internally
// IndexAnalyzers will iterate over all analyzers to close them.
final Map<String, NamedAnalyzer> analyzerMap = new AbstractMap<String, NamedAnalyzer>() {
@Override
public NamedAnalyzer get(Object key) {
assert key instanceof String : "key must be a string but was: " + key.getClass();
return new NamedAnalyzer((String)key, fakeDefault.analyzer());
}
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null);
@Override
public Set<Entry<String, NamedAnalyzer>> entrySet() {
// just to ensure we can iterate over this single analzyer
return Collections.singletonMap(fakeDefault.name(), fakeDefault).entrySet();
}
};
try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap)) {
MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, similarityService, mapperRegistry, () -> null);
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
@ -134,34 +158,6 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
return IndexMetaData.builder(indexMetaData).settings(settings).build();
}
/**
* A fake analysis server that returns the same keyword analyzer for all requests
*/
private static class FakeAnalysisService extends AnalysisService {
private Analyzer fakeAnalyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
throw new UnsupportedOperationException("shouldn't be here");
}
};
public FakeAnalysisService(IndexSettings indexSettings) {
super(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
}
@Override
public NamedAnalyzer analyzer(String name) {
return new NamedAnalyzer(name, fakeAnalyzer);
}
@Override
public void close() {
fakeAnalyzer.close();
super.close();
}
}
IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) {
final Settings settings = indexMetaData.getSettings();
final Settings upgrade = indexScopedSettings.archiveUnknownOrBrokenSettings(settings);

View File

@ -43,7 +43,7 @@ import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.env.ShardLockObtainFailedException;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache;
@ -97,7 +97,7 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex<IndexShard> {
private final IndexEventListener eventListener;
private final AnalysisService analysisService;
private final IndexAnalyzers indexAnalyzers;
private final IndexFieldDataService indexFieldData;
private final BitsetFilterCache bitsetFilterCache;
private final NodeEnvironment nodeEnv;
@ -137,9 +137,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
List<IndexingOperationListener> indexingOperationListeners) throws IOException {
super(indexSettings);
this.indexSettings = indexSettings;
this.analysisService = registry.build(indexSettings);
this.indexAnalyzers = registry.build(indexSettings);
this.similarityService = similarityService;
this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry,
this.mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry,
IndexService.this::newQueryShardContext);
this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache,
nodeServicesProvider.getCircuitBreakerService(), mapperService);
@ -214,8 +214,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
return indexFieldData;
}
public AnalysisService analysisService() {
return this.analysisService;
public IndexAnalyzers getIndexAnalyzers() {
return this.indexAnalyzers;
}
public MapperService mapperService() {
@ -239,7 +239,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
}
}
} finally {
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask);
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, indexAnalyzers, refreshTask, fsyncTask);
}
}
}

View File

@ -18,14 +18,21 @@
*/
package org.elasticsearch.index.analysis;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
@ -39,6 +46,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
@ -46,7 +54,7 @@ import static java.util.Collections.unmodifiableMap;
/**
* An internal registry for tokenizer, token filter, char filter and analyzer.
* This class exists per node and allows to create per-index {@link AnalysisService} via {@link #build(IndexSettings)}
* This class exists per node and allows to create per-index {@link IndexAnalyzers} via {@link #build(IndexSettings)}
*/
public final class AnalysisRegistry implements Closeable {
public static final String INDEX_ANALYSIS_CHAR_FILTER = "index.analysis.char_filter";
@ -136,17 +144,19 @@ public final class AnalysisRegistry implements Closeable {
}
/**
* Creates an index-level {@link AnalysisService} from this registry using the given index settings
* Creates an index-level {@link IndexAnalyzers} from this registry using the given index settings
*/
public AnalysisService build(IndexSettings indexSettings) throws IOException {
final Map<String, Settings> charFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_CHAR_FILTER);
public IndexAnalyzers build(IndexSettings indexSettings) throws IOException {
final Map<String, CharFilterFactory> charFilterFactories = buildCharFilterFactories(indexSettings);
final Map<String, TokenizerFactory> tokenizerFactories = buildTokenizerFactories(indexSettings);
final Map<String, TokenFilterFactory> tokenFilterFactories = buildTokenFilterFactories(indexSettings);
final Map<String, AnalyzerProvider<?>> analyzierFactories = buildAnalyzerFactories(indexSettings);
return build(indexSettings, analyzierFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories);
}
public Map<String, TokenFilterFactory> buildTokenFilterFactories(IndexSettings indexSettings) throws IOException {
final Map<String, Settings> tokenFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_FILTER);
final Map<String, Settings> tokenizersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_TOKENIZER);
final Map<String, Settings> analyzersSettings = indexSettings.getSettings().getGroups("index.analysis.analyzer");
final Map<String, CharFilterFactory> charFilterFactories = buildMapping(false, "charfilter", indexSettings, charFiltersSettings, charFilters, prebuiltAnalysis.charFilterFactories);
final Map<String, TokenizerFactory> tokenizerFactories = buildMapping(false, "tokenizer", indexSettings, tokenizersSettings, tokenizers, prebuiltAnalysis.tokenizerFactories);
Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters = new HashMap<>(this.tokenFilters);
/*
* synonym is different than everything else since it needs access to the tokenizer factories for this index.
@ -154,10 +164,22 @@ public final class AnalysisRegistry implements Closeable {
* hide internal data-structures as much as possible.
*/
tokenFilters.put("synonym", requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)));
final Map<String, TokenFilterFactory> tokenFilterFactories = buildMapping(false, "tokenfilter", indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories);
final Map<String, AnalyzerProvider<?>> analyzierFactories = buildMapping(true, "analyzer", indexSettings, analyzersSettings,
analyzers, prebuiltAnalysis.analyzerProviderFactories);
return new AnalysisService(indexSettings, analyzierFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories);
return buildMapping(false, "tokenfilter", indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories);
}
public Map<String, TokenizerFactory> buildTokenizerFactories(IndexSettings indexSettings) throws IOException {
final Map<String, Settings> tokenizersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_TOKENIZER);
return buildMapping(false, "tokenizer", indexSettings, tokenizersSettings, tokenizers, prebuiltAnalysis.tokenizerFactories);
}
public Map<String, CharFilterFactory> buildCharFilterFactories(IndexSettings indexSettings) throws IOException {
final Map<String, Settings> charFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_CHAR_FILTER);
return buildMapping(false, "charfilter", indexSettings, charFiltersSettings, charFilters, prebuiltAnalysis.charFilterFactories);
}
public Map<String, AnalyzerProvider<?>> buildAnalyzerFactories(IndexSettings indexSettings) throws IOException {
final Map<String, Settings> analyzersSettings = indexSettings.getSettings().getGroups("index.analysis.analyzer");
return buildMapping(true, "analyzer", indexSettings, analyzersSettings, analyzers, prebuiltAnalysis.analyzerProviderFactories);
}
/**
@ -399,4 +421,132 @@ public final class AnalysisRegistry implements Closeable {
IOUtils.close(analyzerProviderFactories.values().stream().map((a) -> ((PreBuiltAnalyzerProviderFactory)a).analyzer()).collect(Collectors.toList()));
}
}
public IndexAnalyzers build(IndexSettings indexSettings,
Map<String, AnalyzerProvider<?>> analyzerProviders,
Map<String, TokenizerFactory> tokenizerFactoryFactories,
Map<String, CharFilterFactory> charFilterFactoryFactories,
Map<String, TokenFilterFactory> tokenFilterFactoryFactories) {
Index index = indexSettings.getIndex();
analyzerProviders = new HashMap<>(analyzerProviders);
Logger logger = Loggers.getLogger(getClass(), indexSettings.getSettings());
DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
Map<String, NamedAnalyzer> analyzerAliases = new HashMap<>();
Map<String, NamedAnalyzer> analyzers = new HashMap<>();
for (Map.Entry<String, AnalyzerProvider<?>> entry : analyzerProviders.entrySet()) {
processAnalyzerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), analyzerAliases, analyzers,
tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories);
}
for (Map.Entry<String, NamedAnalyzer> entry : analyzerAliases.entrySet()) {
String key = entry.getKey();
if (analyzers.containsKey(key) &&
("default".equals(key) || "default_search".equals(key) || "default_search_quoted".equals(key)) == false) {
throw new IllegalStateException("already registered analyzer with name: " + key);
} else {
NamedAnalyzer configured = entry.getValue();
analyzers.put(key, configured);
}
}
if (!analyzers.containsKey("default")) {
processAnalyzerFactory(deprecationLogger, indexSettings, "default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS),
analyzerAliases, analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories);
}
if (!analyzers.containsKey("default_search")) {
analyzers.put("default_search", analyzers.get("default"));
}
if (!analyzers.containsKey("default_search_quoted")) {
analyzers.put("default_search_quoted", analyzers.get("default_search"));
}
NamedAnalyzer defaultAnalyzer = analyzers.get("default");
if (defaultAnalyzer == null) {
throw new IllegalArgumentException("no default analyzer configured");
}
if (analyzers.containsKey("default_index")) {
final Version createdVersion = indexSettings.getIndexVersionCreated();
if (createdVersion.onOrAfter(Version.V_5_0_0_alpha1)) {
throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index.getName() + "]");
} else {
deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index.getName());
}
}
NamedAnalyzer defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : defaultAnalyzer;
NamedAnalyzer defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : defaultAnalyzer;
NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? analyzers.get("default_search_quote") : defaultSearchAnalyzer;
for (Map.Entry<String, NamedAnalyzer> analyzer : analyzers.entrySet()) {
if (analyzer.getKey().startsWith("_")) {
throw new IllegalArgumentException("analyzer name must not start with '_'. got \"" + analyzer.getKey() + "\"");
}
}
return new IndexAnalyzers(indexSettings, defaultIndexAnalyzer, defaultSearchAnalyzer, defaultSearchQuoteAnalyzer,
unmodifiableMap(analyzers));
}
private void processAnalyzerFactory(DeprecationLogger deprecationLogger,
IndexSettings indexSettings,
String name,
AnalyzerProvider<?> analyzerFactory,
Map<String, NamedAnalyzer> analyzerAliases,
Map<String, NamedAnalyzer> analyzers, Map<String, TokenFilterFactory> tokenFilters,
Map<String, CharFilterFactory> charFilters, Map<String, TokenizerFactory> tokenizers) {
/*
* Lucene defaults positionIncrementGap to 0 in all analyzers but
* Elasticsearch defaults them to 0 only before version 2.0
* and 100 afterwards so we override the positionIncrementGap if it
* doesn't match here.
*/
int overridePositionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
if (analyzerFactory instanceof CustomAnalyzerProvider) {
((CustomAnalyzerProvider) analyzerFactory).build(tokenizers, charFilters, tokenFilters);
/*
* Custom analyzers already default to the correct, version
* dependent positionIncrementGap and the user is be able to
* configure the positionIncrementGap directly on the analyzer so
* we disable overriding the positionIncrementGap to preserve the
* user's setting.
*/
overridePositionIncrementGap = Integer.MIN_VALUE;
}
Analyzer analyzerF = analyzerFactory.get();
if (analyzerF == null) {
throw new IllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer");
}
NamedAnalyzer analyzer;
if (analyzerF instanceof NamedAnalyzer) {
// if we got a named analyzer back, use it...
analyzer = (NamedAnalyzer) analyzerF;
if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) {
// unless the positionIncrementGap needs to be overridden
analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap);
}
} else {
analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
}
if (analyzers.containsKey(name)) {
throw new IllegalStateException("already registered analyzer with name: " + name);
}
analyzers.put(name, analyzer);
// TODO: remove alias support completely when we no longer support pre 5.0 indices
final String analyzerAliasKey = "index.analysis.analyzer." + analyzerFactory.name() + ".alias";
if (indexSettings.getSettings().get(analyzerAliasKey) != null) {
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0_alpha6)) {
// do not allow alias creation if the index was created on or after v5.0 alpha6
throw new IllegalArgumentException("setting [" + analyzerAliasKey + "] is not supported");
}
// the setting is now removed but we only support it for loading indices created before v5.0
deprecationLogger.deprecated("setting [{}] is only allowed on index [{}] because it was created before 5.x; " +
"analyzer aliases can no longer be created on new indices.", analyzerAliasKey, indexSettings.getIndex().getName());
Set<String> aliases = Sets.newHashSet(indexSettings.getSettings().getAsArray(analyzerAliasKey));
for (String alias : aliases) {
if (analyzerAliases.putIfAbsent(alias, analyzer) != null) {
throw new IllegalStateException("alias [" + alias + "] is already used by [" + analyzerAliases.get(alias).name() + "]");
}
}
}
}
}

View File

@ -1,218 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.TextFieldMapper;
import java.io.Closeable;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import static java.util.Collections.unmodifiableMap;
/**
*
*/
public class AnalysisService extends AbstractIndexComponent implements Closeable {
private final Map<String, NamedAnalyzer> analyzers;
private final Map<String, TokenizerFactory> tokenizers;
private final Map<String, CharFilterFactory> charFilters;
private final Map<String, TokenFilterFactory> tokenFilters;
private final NamedAnalyzer defaultIndexAnalyzer;
private final NamedAnalyzer defaultSearchAnalyzer;
private final NamedAnalyzer defaultSearchQuoteAnalyzer;
public AnalysisService(IndexSettings indexSettings,
Map<String, AnalyzerProvider<?>> analyzerProviders,
Map<String, TokenizerFactory> tokenizerFactoryFactories,
Map<String, CharFilterFactory> charFilterFactoryFactories,
Map<String, TokenFilterFactory> tokenFilterFactoryFactories) {
super(indexSettings);
this.tokenizers = unmodifiableMap(tokenizerFactoryFactories);
this.charFilters = unmodifiableMap(charFilterFactoryFactories);
this.tokenFilters = unmodifiableMap(tokenFilterFactoryFactories);
analyzerProviders = new HashMap<>(analyzerProviders);
Map<String, NamedAnalyzer> analyzerAliases = new HashMap<>();
Map<String, NamedAnalyzer> analyzers = new HashMap<>();
for (Map.Entry<String, AnalyzerProvider<?>> entry : analyzerProviders.entrySet()) {
processAnalyzerFactory(entry.getKey(), entry.getValue(), analyzerAliases, analyzers);
}
for (Map.Entry<String, NamedAnalyzer> entry : analyzerAliases.entrySet()) {
String key = entry.getKey();
if (analyzers.containsKey(key) &&
("default".equals(key) || "default_search".equals(key) || "default_search_quoted".equals(key)) == false) {
throw new IllegalStateException("already registered analyzer with name: " + key);
} else {
NamedAnalyzer configured = entry.getValue();
analyzers.put(key, configured);
}
}
if (!analyzers.containsKey("default")) {
processAnalyzerFactory("default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS),
analyzerAliases, analyzers);
}
if (!analyzers.containsKey("default_search")) {
analyzers.put("default_search", analyzers.get("default"));
}
if (!analyzers.containsKey("default_search_quoted")) {
analyzers.put("default_search_quoted", analyzers.get("default_search"));
}
NamedAnalyzer defaultAnalyzer = analyzers.get("default");
if (defaultAnalyzer == null) {
throw new IllegalArgumentException("no default analyzer configured");
}
if (analyzers.containsKey("default_index")) {
final Version createdVersion = indexSettings.getIndexVersionCreated();
if (createdVersion.onOrAfter(Version.V_5_0_0_alpha1)) {
throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index().getName() + "]");
} else {
deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index().getName());
}
}
defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : defaultAnalyzer;
defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : defaultAnalyzer;
defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? analyzers.get("default_search_quote") : defaultSearchAnalyzer;
for (Map.Entry<String, NamedAnalyzer> analyzer : analyzers.entrySet()) {
if (analyzer.getKey().startsWith("_")) {
throw new IllegalArgumentException("analyzer name must not start with '_'. got \"" + analyzer.getKey() + "\"");
}
}
this.analyzers = unmodifiableMap(analyzers);
}
private void processAnalyzerFactory(String name, AnalyzerProvider<?> analyzerFactory, Map<String, NamedAnalyzer> analyzerAliases, Map<String, NamedAnalyzer> analyzers) {
/*
* Lucene defaults positionIncrementGap to 0 in all analyzers but
* Elasticsearch defaults them to 0 only before version 2.0
* and 100 afterwards so we override the positionIncrementGap if it
* doesn't match here.
*/
int overridePositionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
if (analyzerFactory instanceof CustomAnalyzerProvider) {
((CustomAnalyzerProvider) analyzerFactory).build(this);
/*
* Custom analyzers already default to the correct, version
* dependent positionIncrementGap and the user is be able to
* configure the positionIncrementGap directly on the analyzer so
* we disable overriding the positionIncrementGap to preserve the
* user's setting.
*/
overridePositionIncrementGap = Integer.MIN_VALUE;
}
Analyzer analyzerF = analyzerFactory.get();
if (analyzerF == null) {
throw new IllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer");
}
NamedAnalyzer analyzer;
if (analyzerF instanceof NamedAnalyzer) {
// if we got a named analyzer back, use it...
analyzer = (NamedAnalyzer) analyzerF;
if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) {
// unless the positionIncrementGap needs to be overridden
analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap);
}
} else {
analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
}
if (analyzers.containsKey(name)) {
throw new IllegalStateException("already registered analyzer with name: " + name);
}
analyzers.put(name, analyzer);
// TODO: remove alias support completely when we no longer support pre 5.0 indices
final String analyzerAliasKey = "index.analysis.analyzer." + analyzerFactory.name() + ".alias";
if (indexSettings.getSettings().get(analyzerAliasKey) != null) {
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0_alpha6)) {
// do not allow alias creation if the index was created on or after v5.0 alpha6
throw new IllegalArgumentException("setting [" + analyzerAliasKey + "] is not supported");
}
// the setting is now removed but we only support it for loading indices created before v5.0
deprecationLogger.deprecated("setting [{}] is only allowed on index [{}] because it was created before 5.x; " +
"analyzer aliases can no longer be created on new indices.", analyzerAliasKey, index().getName());
Set<String> aliases = Sets.newHashSet(indexSettings.getSettings().getAsArray(analyzerAliasKey));
for (String alias : aliases) {
if (analyzerAliases.putIfAbsent(alias, analyzer) != null) {
throw new IllegalStateException("alias [" + alias + "] is already used by [" + analyzerAliases.get(alias).name() + "]");
}
}
}
}
@Override
public void close() {
for (NamedAnalyzer analyzer : analyzers.values()) {
if (analyzer.scope() == AnalyzerScope.INDEX) {
try {
analyzer.close();
} catch (NullPointerException e) {
// because analyzers are aliased, they might be closed several times
// an NPE is thrown in this case, so ignore....
// TODO: Analyzer's can no longer have aliases in indices created in 5.x and beyond,
// so we only allow the aliases for analyzers on indices created pre 5.x for backwards
// compatibility. Once pre 5.0 indices are no longer supported, this check should be removed.
} catch (Exception e) {
logger.debug("failed to close analyzer {}", analyzer);
}
}
}
}
public NamedAnalyzer analyzer(String name) {
return analyzers.get(name);
}
public NamedAnalyzer defaultIndexAnalyzer() {
return defaultIndexAnalyzer;
}
public NamedAnalyzer defaultSearchAnalyzer() {
return defaultSearchAnalyzer;
}
public NamedAnalyzer defaultSearchQuoteAnalyzer() {
return defaultSearchQuoteAnalyzer;
}
public TokenizerFactory tokenizer(String name) {
return tokenizers.get(name);
}
public CharFilterFactory charFilter(String name) {
return charFilters.get(name);
}
public TokenFilterFactory tokenFilter(String name) {
return tokenFilters.get(name);
}
}

View File

@ -26,6 +26,7 @@ import org.elasticsearch.index.mapper.TextFieldMapper;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* A custom analyzer that is built out of a single {@link org.apache.lucene.analysis.Tokenizer} and a list
@ -43,35 +44,36 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
this.analyzerSettings = settings;
}
public void build(AnalysisService analysisService) {
public void build(final Map<String, TokenizerFactory> tokenizers, final Map<String, CharFilterFactory> charFilters,
final Map<String, TokenFilterFactory> tokenFilters) {
String tokenizerName = analyzerSettings.get("tokenizer");
if (tokenizerName == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] must be configured with a tokenizer");
}
TokenizerFactory tokenizer = analysisService.tokenizer(tokenizerName);
TokenizerFactory tokenizer = tokenizers.get(tokenizerName);
if (tokenizer == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find tokenizer under name [" + tokenizerName + "]");
}
List<CharFilterFactory> charFilters = new ArrayList<>();
List<CharFilterFactory> charFiltersList = new ArrayList<>();
String[] charFilterNames = analyzerSettings.getAsArray("char_filter");
for (String charFilterName : charFilterNames) {
CharFilterFactory charFilter = analysisService.charFilter(charFilterName);
CharFilterFactory charFilter = charFilters.get(charFilterName);
if (charFilter == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find char_filter under name [" + charFilterName + "]");
}
charFilters.add(charFilter);
charFiltersList.add(charFilter);
}
List<TokenFilterFactory> tokenFilters = new ArrayList<>();
List<TokenFilterFactory> tokenFilterList = new ArrayList<>();
String[] tokenFilterNames = analyzerSettings.getAsArray("filter");
for (String tokenFilterName : tokenFilterNames) {
TokenFilterFactory tokenFilter = analysisService.tokenFilter(tokenFilterName);
TokenFilterFactory tokenFilter = tokenFilters.get(tokenFilterName);
if (tokenFilter == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find filter under name [" + tokenFilterName + "]");
}
tokenFilters.add(tokenFilter);
tokenFilterList.add(tokenFilter);
}
int positionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
@ -93,8 +95,8 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
int offsetGap = analyzerSettings.getAsInt("offset_gap", -1);;
this.customAnalyzer = new CustomAnalyzer(tokenizer,
charFilters.toArray(new CharFilterFactory[charFilters.size()]),
tokenFilters.toArray(new TokenFilterFactory[tokenFilters.size()]),
charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]),
tokenFilterList.toArray(new TokenFilterFactory[tokenFilterList.size()]),
positionIncrementGap,
offsetGap
);

View File

@ -0,0 +1,96 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import java.io.Closeable;
import java.io.IOException;
import java.util.Map;
/**
* IndexAnalyzers contains a name to analyzer mapping for a specific index.
* This class only holds analyzers that are explicitly configured for an index and doesn't allow
* access to individual tokenizers, char or token filter.
*
* @see AnalysisRegistry
*/
public final class IndexAnalyzers extends AbstractIndexComponent implements Closeable {
private final NamedAnalyzer defaultIndexAnalyzer;
private final NamedAnalyzer defaultSearchAnalyzer;
private final NamedAnalyzer defaultSearchQuoteAnalyzer;
private final Map<String, NamedAnalyzer> analyzers;
private final IndexSettings indexSettings;
public IndexAnalyzers(IndexSettings indexSettings, NamedAnalyzer defaultIndexAnalyzer, NamedAnalyzer defaultSearchAnalyzer,
NamedAnalyzer defaultSearchQuoteAnalyzer, Map<String, NamedAnalyzer> analyzers) {
super(indexSettings);
this.defaultIndexAnalyzer = defaultIndexAnalyzer;
this.defaultSearchAnalyzer = defaultSearchAnalyzer;
this.defaultSearchQuoteAnalyzer = defaultSearchQuoteAnalyzer;
this.analyzers = analyzers;
this.indexSettings = indexSettings;
}
/**
* Returns an analyzer mapped to the given name or <code>null</code> if not present
*/
public NamedAnalyzer get(String name) {
return analyzers.get(name);
}
/**
* Returns the default index analyzer for this index
*/
public NamedAnalyzer getDefaultIndexAnalyzer() {
return defaultIndexAnalyzer;
}
/**
* Returns the default search analyzer for this index
*/
public NamedAnalyzer getDefaultSearchAnalyzer() {
return defaultSearchAnalyzer;
}
/**
* Returns the default search quote analyzer for this index
*/
public NamedAnalyzer getDefaultSearchQuoteAnalyzer() {
return defaultSearchQuoteAnalyzer;
}
@Override
public void close() throws IOException {
IOUtils.close(() -> analyzers.values().stream()
.filter(a -> a.scope() == AnalyzerScope.INDEX)
.iterator());
}
/**
* Returns the indices settings
*/
public IndexSettings getIndexSettings() {
return indexSettings;
}
}

View File

@ -106,9 +106,9 @@ public class AllFieldMapper extends MetadataFieldMapper {
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node,
ParserContext parserContext) throws MapperParsingException {
Builder builder = new Builder(parserContext.mapperService().fullName(NAME));
builder.fieldType().setIndexAnalyzer(parserContext.analysisService().defaultIndexAnalyzer());
builder.fieldType().setSearchAnalyzer(parserContext.analysisService().defaultSearchAnalyzer());
builder.fieldType().setSearchQuoteAnalyzer(parserContext.analysisService().defaultSearchQuoteAnalyzer());
builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer());
builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer());
builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer());
// parseField below will happily parse the doc_values setting, but it is then never passed to
// the AllFieldMapper ctor in the builder since it is not valid. Here we validate

View File

@ -153,7 +153,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
if (searchAnalyzer != null) {
throw new MapperParsingException("analyzer on completion field [" + name + "] must be set when search_analyzer is set");
}
indexAnalyzer = searchAnalyzer = parserContext.analysisService().analyzer("simple");
indexAnalyzer = searchAnalyzer = parserContext.getIndexAnalyzers().get("simple");
} else if (searchAnalyzer == null) {
searchAnalyzer = indexAnalyzer;
}
@ -164,7 +164,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
}
private NamedAnalyzer getNamedAnalyzer(ParserContext parserContext, String name) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(name);
NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(name);
if (analyzer == null) {
throw new IllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]");
}

View File

@ -206,7 +206,7 @@ public class CompletionFieldMapper2x extends FieldMapper {
throw new MapperParsingException(
"analyzer on completion field [" + name + "] must be set when search_analyzer is set");
}
indexAnalyzer = searchAnalyzer = parserContext.analysisService().analyzer("simple");
indexAnalyzer = searchAnalyzer = parserContext.getIndexAnalyzers().get("simple");
} else if (searchAnalyzer == null) {
searchAnalyzer = indexAnalyzer;
}
@ -217,7 +217,7 @@ public class CompletionFieldMapper2x extends FieldMapper {
}
private NamedAnalyzer getNamedAnalyzer(ParserContext parserContext, String name) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(name);
NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(name);
if (analyzer == null) {
throw new IllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]");
}

View File

@ -32,7 +32,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.MetadataFieldMapper.TypeParser;
import org.elasticsearch.search.internal.SearchContext;
@ -147,11 +147,11 @@ public class DocumentMapper implements ToXContent {
}
MapperUtils.collect(this.mapping.root, newObjectMappers, newFieldMappers);
final AnalysisService analysisService = mapperService.analysisService();
final IndexAnalyzers indexAnalyzers = mapperService.getIndexAnalyzers();
this.fieldMappers = new DocumentFieldMappers(newFieldMappers,
analysisService.defaultIndexAnalyzer(),
analysisService.defaultSearchAnalyzer(),
analysisService.defaultSearchQuoteAnalyzer());
indexAnalyzers.getDefaultIndexAnalyzer(),
indexAnalyzers.getDefaultSearchAnalyzer(),
indexAnalyzers.getDefaultSearchQuoteAnalyzer());
Map<String, ObjectMapper> builder = new HashMap<>();
for (ObjectMapper objectMapper : newObjectMappers) {

View File

@ -22,14 +22,13 @@ package org.elasticsearch.index.mapper;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry;
@ -44,7 +43,7 @@ import static java.util.Collections.unmodifiableMap;
public class DocumentMapperParser {
final MapperService mapperService;
final AnalysisService analysisService;
final IndexAnalyzers indexAnalyzers;
private final SimilarityService similarityService;
private final Supplier<QueryShardContext> queryShardContextSupplier;
@ -56,12 +55,12 @@ public class DocumentMapperParser {
private final Map<String, Mapper.TypeParser> typeParsers;
private final Map<String, MetadataFieldMapper.TypeParser> rootTypeParsers;
public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperService, AnalysisService analysisService,
public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperService, IndexAnalyzers indexAnalyzers,
SimilarityService similarityService, MapperRegistry mapperRegistry,
Supplier<QueryShardContext> queryShardContextSupplier) {
this.parseFieldMatcher = new ParseFieldMatcher(indexSettings.getSettings());
this.mapperService = mapperService;
this.analysisService = analysisService;
this.indexAnalyzers = indexAnalyzers;
this.similarityService = similarityService;
this.queryShardContextSupplier = queryShardContextSupplier;
this.typeParsers = mapperRegistry.getMapperParsers();
@ -70,7 +69,7 @@ public class DocumentMapperParser {
}
public Mapper.TypeParser.ParserContext parserContext(String type) {
return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier.get());
return new Mapper.TypeParser.ParserContext(type, indexAnalyzers, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier.get());
}
public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException {

View File

@ -97,7 +97,7 @@ public class LegacyTokenCountFieldMapper extends LegacyIntegerFieldMapper {
builder.nullValue(nodeIntegerValue(propNode));
iterator.remove();
} else if (propName.equals("analyzer")) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}

View File

@ -24,7 +24,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityProvider;
@ -85,7 +85,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
private final String type;
private final AnalysisService analysisService;
private final IndexAnalyzers indexAnalyzers;
private final Function<String, SimilarityProvider> similarityLookupService;
@ -99,11 +99,11 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
private final QueryShardContext queryShardContext;
public ParserContext(String type, AnalysisService analysisService, Function<String, SimilarityProvider> similarityLookupService,
public ParserContext(String type, IndexAnalyzers indexAnalyzers, Function<String, SimilarityProvider> similarityLookupService,
MapperService mapperService, Function<String, TypeParser> typeParsers,
Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, QueryShardContext queryShardContext) {
this.type = type;
this.analysisService = analysisService;
this.indexAnalyzers = indexAnalyzers;
this.similarityLookupService = similarityLookupService;
this.mapperService = mapperService;
this.typeParsers = typeParsers;
@ -116,8 +116,8 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
return type;
}
public AnalysisService analysisService() {
return analysisService;
public IndexAnalyzers getIndexAnalyzers() {
return indexAnalyzers;
}
public SimilarityProvider getSimilarity(String name) {
@ -159,7 +159,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
static class MultiFieldParserContext extends ParserContext {
MultiFieldParserContext(ParserContext in) {
super(in.type(), in.analysisService, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContext());
super(in.type(), in.indexAnalyzers, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContext());
}
}

View File

@ -36,7 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityService;
@ -100,7 +100,7 @@ public class MapperService extends AbstractIndexComponent {
@Deprecated
public static final String PERCOLATOR_LEGACY_TYPE_NAME = ".percolator";
private final AnalysisService analysisService;
private final IndexAnalyzers indexAnalyzers;
/**
* Will create types automatically if they do not exists in the mapping definition yet
@ -127,16 +127,16 @@ public class MapperService extends AbstractIndexComponent {
final MapperRegistry mapperRegistry;
public MapperService(IndexSettings indexSettings, AnalysisService analysisService,
public MapperService(IndexSettings indexSettings, IndexAnalyzers indexAnalyzers,
SimilarityService similarityService, MapperRegistry mapperRegistry,
Supplier<QueryShardContext> queryShardContextSupplier) {
super(indexSettings);
this.analysisService = analysisService;
this.indexAnalyzers = indexAnalyzers;
this.fieldTypes = new FieldTypeLookup();
this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, mapperRegistry, queryShardContextSupplier);
this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer());
this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer());
this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
this.documentParser = new DocumentMapperParser(indexSettings, this, indexAnalyzers, similarityService, mapperRegistry, queryShardContextSupplier);
this.indexAnalyzer = new MapperAnalyzerWrapper(indexAnalyzers.getDefaultIndexAnalyzer(), p -> p.indexAnalyzer());
this.searchAnalyzer = new MapperAnalyzerWrapper(indexAnalyzers.getDefaultSearchAnalyzer(), p -> p.searchAnalyzer());
this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(indexAnalyzers.getDefaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
this.mapperRegistry = mapperRegistry;
this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING);
@ -171,8 +171,8 @@ public class MapperService extends AbstractIndexComponent {
};
}
public AnalysisService analysisService() {
return this.analysisService;
public IndexAnalyzers getIndexAnalyzers() {
return this.indexAnalyzers;
}
public DocumentMapperParser documentMapperParser() {

View File

@ -29,15 +29,11 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.all.AllEntries;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.analysis.AnalysisService;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
/**
*
*/
public abstract class ParseContext {
/** Fork of {@link org.apache.lucene.document.Document} with additional functionality. */
@ -242,11 +238,6 @@ public abstract class ParseContext {
return in.docMapper();
}
@Override
public AnalysisService analysisService() {
return in.analysisService();
}
@Override
public MapperService mapperService() {
return in.mapperService();
@ -385,11 +376,6 @@ public abstract class ParseContext {
return this.docMapper;
}
@Override
public AnalysisService analysisService() {
return docMapperParser.analysisService;
}
@Override
public MapperService mapperService() {
return docMapperParser.mapperService;
@ -525,8 +511,6 @@ public abstract class ParseContext {
public abstract DocumentMapper docMapper();
public abstract AnalysisService analysisService();
public abstract MapperService mapperService();
public abstract Field version();

View File

@ -318,13 +318,13 @@ public class StringFieldMapper extends FieldMapper {
// we need to update to actual analyzers if they are not set in this case...
// so we can inject the position increment gap...
if (builder.fieldType().indexAnalyzer() == null) {
builder.fieldType().setIndexAnalyzer(parserContext.analysisService().defaultIndexAnalyzer());
builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer());
}
if (builder.fieldType().searchAnalyzer() == null) {
builder.fieldType().setSearchAnalyzer(parserContext.analysisService().defaultSearchAnalyzer());
builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer());
}
if (builder.fieldType().searchQuoteAnalyzer() == null) {
builder.fieldType().setSearchQuoteAnalyzer(parserContext.analysisService().defaultSearchQuoteAnalyzer());
builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer());
}
iterator.remove();
} else if (propName.equals("ignore_above")) {

View File

@ -174,13 +174,13 @@ public class TextFieldMapper extends FieldMapper {
}
node.put("fielddata", fielddata);
}
return new StringFieldMapper.TypeParser().parse(fieldName, node, parserContext);
}
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(fieldName);
builder.fieldType().setIndexAnalyzer(parserContext.analysisService().defaultIndexAnalyzer());
builder.fieldType().setSearchAnalyzer(parserContext.analysisService().defaultSearchAnalyzer());
builder.fieldType().setSearchQuoteAnalyzer(parserContext.analysisService().defaultSearchQuoteAnalyzer());
builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer());
builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer());
builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer());
parseTextField(builder, fieldName, node, parserContext);
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next();

View File

@ -89,7 +89,7 @@ public class TokenCountFieldMapper extends FieldMapper {
builder.nullValue(nodeIntegerValue(propNode));
iterator.remove();
} else if (propName.equals("analyzer")) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}

View File

@ -125,21 +125,21 @@ public class TypeParsers {
builder.storeTermVectorPayloads(nodeBooleanValue("store_term_vector_payloads", propNode, parserContext));
iterator.remove();
} else if (propName.equals("analyzer")) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
indexAnalyzer = analyzer;
iterator.remove();
} else if (propName.equals("search_analyzer")) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
searchAnalyzer = analyzer;
iterator.remove();
} else if (propName.equals("search_quote_analyzer")) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}

View File

@ -383,7 +383,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
analyzerObj = context.getMapperService().searchAnalyzer();
}
} else {
analyzerObj = context.getMapperService().analysisService().analyzer(analyzer);
analyzerObj = context.getMapperService().getIndexAnalyzers().get(analyzer);
if (analyzerObj == null) {
throw new QueryShardException(context, "[common] analyzer [" + analyzer + "] not found");
}

View File

@ -164,7 +164,7 @@ public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder<MatchPhr
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
// validate context specific fields
if (analyzer != null && context.getAnalysisService().analyzer(analyzer) == null) {
if (analyzer != null && context.getIndexAnalyzers().get(analyzer) == null) {
throw new QueryShardException(context, "[" + NAME + "] analyzer [" + analyzer + "] not found");
}

View File

@ -140,7 +140,7 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder<MatchPhraseQue
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
// validate context specific fields
if (analyzer != null && context.getAnalysisService().analyzer(analyzer) == null) {
if (analyzer != null && context.getIndexAnalyzers().get(analyzer) == null) {
throw new QueryShardException(context, "[" + NAME + "] analyzer [" + analyzer + "] not found");
}

View File

@ -444,7 +444,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder<MatchQueryBuilder> {
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
// validate context specific fields
if (analyzer != null && context.getAnalysisService().analyzer(analyzer) == null) {
if (analyzer != null && context.getIndexAnalyzers().get(analyzer) == null) {
throw new QueryShardException(context, "[" + NAME + "] analyzer [" + analyzer + "] not found");
}

View File

@ -147,7 +147,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
*/
public static final class Item implements ToXContent, Writeable {
public static final Item[] EMPTY_ARRAY = new Item[0];
public interface Field {
ParseField INDEX = new ParseField("_index");
ParseField TYPE = new ParseField("_type");
@ -1021,7 +1021,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
}
// set analyzer
Analyzer analyzerObj = context.getAnalysisService().analyzer(analyzer);
Analyzer analyzerObj = context.getIndexAnalyzers().get(analyzer);
if (analyzerObj == null) {
analyzerObj = context.getMapperService().searchAnalyzer();
}

View File

@ -708,7 +708,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
protected Query doToQuery(QueryShardContext context) throws IOException {
MultiMatchQuery multiMatchQuery = new MultiMatchQuery(context);
if (analyzer != null) {
if (context.getAnalysisService().analyzer(analyzer) == null) {
if (context.getIndexAnalyzers().get(analyzer) == null) {
throw new QueryShardException(context, "[" + NAME + "] analyzer [" + analyzer + "] not found");
}
multiMatchQuery.setAnalyzer(analyzer);

View File

@ -41,7 +41,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
@ -116,8 +116,8 @@ public class QueryShardContext extends QueryRewriteContext {
this.isFilter = false;
}
public AnalysisService getAnalysisService() {
return mapperService.analysisService();
public IndexAnalyzers getIndexAnalyzers() {
return mapperService.getIndexAnalyzers();
}
public Similarity getSearchSimilarity() {

View File

@ -868,14 +868,14 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
if (analyzer == null) {
qpSettings.defaultAnalyzer(context.getMapperService().searchAnalyzer());
} else {
NamedAnalyzer namedAnalyzer = context.getAnalysisService().analyzer(analyzer);
NamedAnalyzer namedAnalyzer = context.getIndexAnalyzers().get(analyzer);
if (namedAnalyzer == null) {
throw new QueryShardException(context, "[query_string] analyzer [" + analyzer + "] not found");
}
qpSettings.forceAnalyzer(namedAnalyzer);
}
if (quoteAnalyzer != null) {
NamedAnalyzer namedAnalyzer = context.getAnalysisService().analyzer(quoteAnalyzer);
NamedAnalyzer namedAnalyzer = context.getIndexAnalyzers().get(quoteAnalyzer);
if (namedAnalyzer == null) {
throw new QueryShardException(context, "[query_string] quote_analyzer [" + quoteAnalyzer + "] not found");
}

View File

@ -355,7 +355,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
if (analyzer == null) {
luceneAnalyzer = context.getMapperService().searchAnalyzer();
} else {
luceneAnalyzer = context.getAnalysisService().analyzer(analyzer);
luceneAnalyzer = context.getIndexAnalyzers().get(analyzer);
if (luceneAnalyzer == null) {
throw new QueryShardException(context, "[" + SimpleQueryStringBuilder.NAME + "] analyzer [" + analyzer
+ "] not found");

View File

@ -204,7 +204,7 @@ public class MatchQuery {
}
return context.getMapperService().searchAnalyzer();
} else {
Analyzer analyzer = context.getMapperService().analysisService().analyzer(this.analyzer);
Analyzer analyzer = context.getMapperService().getIndexAnalyzers().get(this.analyzer);
if (analyzer == null) {
throw new IllegalArgumentException("No analyzer found for [" + this.analyzer + "]");
}

View File

@ -214,12 +214,12 @@ public class TermVectorsService {
MapperService mapperService = indexShard.mapperService();
Analyzer analyzer;
if (perFieldAnalyzer != null && perFieldAnalyzer.containsKey(field)) {
analyzer = mapperService.analysisService().analyzer(perFieldAnalyzer.get(field).toString());
analyzer = mapperService.getIndexAnalyzers().get(perFieldAnalyzer.get(field).toString());
} else {
analyzer = mapperService.fullName(field).indexAnalyzer();
}
if (analyzer == null) {
analyzer = mapperService.analysisService().defaultIndexAnalyzer();
analyzer = mapperService.getIndexAnalyzers().getDefaultIndexAnalyzer();
}
return analyzer;
}

View File

@ -39,7 +39,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
@ -497,11 +496,6 @@ final class DefaultSearchContext extends SearchContext {
return indexService.mapperService();
}
@Override
public AnalysisService analysisService() {
return indexService.analysisService();
}
@Override
public SimilarityService similarityService() {
return indexService.similarityService();

View File

@ -27,7 +27,6 @@ import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
@ -259,11 +258,6 @@ public abstract class FilteredSearchContext extends SearchContext {
return in.mapperService();
}
@Override
public AnalysisService analysisService() {
return in.analysisService();
}
@Override
public SimilarityService similarityService() {
return in.similarityService();

View File

@ -33,7 +33,6 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.common.util.concurrent.RefCounted;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
@ -237,8 +236,6 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas
public abstract MapperService mapperService();
public abstract AnalysisService analysisService();
public abstract SimilarityService similarityService();
public abstract ScriptService scriptService();

View File

@ -319,7 +319,7 @@ public abstract class SuggestionBuilder<T extends SuggestionBuilder<T>> extends
suggestionContext.setAnalyzer(fieldType.searchAnalyzer());
}
} else {
Analyzer luceneAnalyzer = mapperService.analysisService().analyzer(analyzer);
Analyzer luceneAnalyzer = mapperService.getIndexAnalyzers().get(analyzer);
if (luceneAnalyzer == null) {
throw new IllegalArgumentException("analyzer [" + analyzer + "] doesn't exists");
}

View File

@ -395,13 +395,13 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator
generator.setField(this.field);
transferIfNotNull(this.size, generator::size);
if (this.preFilter != null) {
generator.preFilter(mapperService.analysisService().analyzer(this.preFilter));
generator.preFilter(mapperService.getIndexAnalyzers().get(this.preFilter));
if (generator.preFilter() == null) {
throw new IllegalArgumentException("Analyzer [" + this.preFilter + "] doesn't exists");
}
}
if (this.postFilter != null) {
generator.postFilter(mapperService.analysisService().analyzer(this.postFilter));
generator.postFilter(mapperService.getIndexAnalyzers().get(this.postFilter));
if (generator.postFilter() == null) {
throw new IllegalArgumentException("Analyzer [" + this.postFilter + "] doesn't exists");
}

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.action.admin.indices;
import org.apache.lucene.analysis.minhash.MinHashFilter;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
@ -29,7 +28,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.AllFieldMapper;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.test.ESTestCase;
@ -42,7 +41,7 @@ import static java.util.Collections.emptyList;
public class TransportAnalyzeActionTests extends ESTestCase {
private AnalysisService analysisService;
private IndexAnalyzers indexAnalyzers;
private AnalysisRegistry registry;
private Environment environment;
@ -71,10 +70,10 @@ public class TransportAnalyzeActionTests extends ESTestCase {
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
environment = new Environment(settings);
registry = new AnalysisModule(environment, emptyList()).getAnalysisRegistry();
analysisService = registry.build(idxSettings);
indexAnalyzers = registry.build(idxSettings);
}
public void testNoAnalysisService() throws IOException {
public void testNoIndexAnalyzers() throws IOException {
AnalyzeRequest request = new AnalyzeRequest();
request.analyzer("standard");
request.text("the quick brown fox");
@ -87,7 +86,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
request.addTokenFilter("lowercase");
request.addTokenFilter("word_delimiter");
request.text("the qu1ck brown fox");
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, randomBoolean() ? analysisService : null, registry, environment);
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, randomBoolean() ? indexAnalyzers : null, registry, environment);
tokens = analyze.getTokens();
assertEquals(6, tokens.size());
assertEquals("qu", tokens.get(1).getTerm());
@ -100,7 +99,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
request.addTokenFilter("lowercase");
request.addTokenFilter("word_delimiter");
request.text("<p>the qu1ck brown fox</p>");
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, randomBoolean() ? analysisService : null, registry, environment);
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, randomBoolean() ? indexAnalyzers : null, registry, environment);
tokens = analyze.getTokens();
assertEquals(6, tokens.size());
assertEquals("the", tokens.get(0).getTerm());
@ -143,26 +142,26 @@ public class TransportAnalyzeActionTests extends ESTestCase {
assertEquals("<ALPHANUM>", tokens.get(3).getType());
}
public void testWithAnalysisService() throws IOException {
public void testWithIndexAnalyzers() throws IOException {
AnalyzeRequest request = new AnalyzeRequest();
request.analyzer("standard");
request.text("the quick brown fox");
request.analyzer("custom_analyzer");
request.text("the qu1ck brown fox");
AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
List<AnalyzeResponse.AnalyzeToken> tokens = analyze.getTokens();
assertEquals(4, tokens.size());
request.analyzer("whitespace");
request.text("the qu1ck brown fox-dog");
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
tokens = analyze.getTokens();
assertEquals(4, tokens.size());
request.analyzer("custom_analyzer");
request.text("the qu1ck brown fox-dog");
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
tokens = analyze.getTokens();
assertEquals(5, tokens.size());
@ -171,7 +170,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
request.addTokenFilter("lowercase");
request.addTokenFilter("wordDelimiter");
request.text("the qu1ck brown fox-dog");
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
tokens = analyze.getTokens();
assertEquals(5, tokens.size());
assertEquals("the", tokens.get(0).getTerm());
@ -184,14 +183,14 @@ public class TransportAnalyzeActionTests extends ESTestCase {
request.tokenizer("trigram");
request.addTokenFilter("synonym");
request.text("kimchy");
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
tokens = analyze.getTokens();
assertEquals(2, tokens.size());
assertEquals("sha", tokens.get(0).getTerm());
assertEquals("hay", tokens.get(1).getTerm());
}
public void testGetIndexAnalyserWithoutAnalysisService() throws IOException {
public void testGetIndexAnalyserWithoutIndexAnalyzers() throws IOException {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> TransportAnalyzeAction.analyze(
new AnalyzeRequest()
@ -208,7 +207,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
new AnalyzeRequest()
.analyzer("foobar")
.text("the qu1ck brown fox"),
AllFieldMapper.NAME, null, notGlobal ? analysisService : null, registry, environment));
AllFieldMapper.NAME, null, notGlobal ? indexAnalyzers : null, registry, environment));
if (notGlobal) {
assertEquals(e.getMessage(), "failed to find analyzer [foobar]");
} else {
@ -220,7 +219,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
new AnalyzeRequest()
.tokenizer("foobar")
.text("the qu1ck brown fox"),
AllFieldMapper.NAME, null, notGlobal ? analysisService : null, registry, environment));
AllFieldMapper.NAME, null, notGlobal ? indexAnalyzers : null, registry, environment));
if (notGlobal) {
assertEquals(e.getMessage(), "failed to find tokenizer under [foobar]");
} else {
@ -233,7 +232,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
.tokenizer("whitespace")
.addTokenFilter("foobar")
.text("the qu1ck brown fox"),
AllFieldMapper.NAME, null, notGlobal ? analysisService : null, registry, environment));
AllFieldMapper.NAME, null, notGlobal ? indexAnalyzers : null, registry, environment));
if (notGlobal) {
assertEquals(e.getMessage(), "failed to find token filter under [foobar]");
} else {
@ -247,7 +246,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
.addTokenFilter("lowercase")
.addCharFilter("foobar")
.text("the qu1ck brown fox"),
AllFieldMapper.NAME, null, notGlobal ? analysisService : null, registry, environment));
AllFieldMapper.NAME, null, notGlobal ? indexAnalyzers : null, registry, environment));
if (notGlobal) {
assertEquals(e.getMessage(), "failed to find char filter under [foobar]");
} else {
@ -260,7 +259,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
request.tokenizer("whitespace");
request.addTokenFilter("min_hash");
request.text("the quick brown fox");
AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
List<AnalyzeResponse.AnalyzeToken> tokens = analyze.getTokens();
int default_hash_count = 1;
int default_bucket_size = 512;

View File

@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@ -30,11 +31,11 @@ import java.io.StringReader;
public class ASCIIFoldingTokenFilterFactoryTests extends ESTokenStreamTestCase {
public void testDefault() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_ascii_folding.type", "asciifolding")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_ascii_folding");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ascii_folding");
String source = "Ansprüche";
String[] expected = new String[]{"Anspruche"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -43,12 +44,12 @@ public class ASCIIFoldingTokenFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testPreserveOriginal() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_ascii_folding.type", "asciifolding")
.put("index.analysis.filter.my_ascii_folding.preserve_original", true)
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_ascii_folding");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ascii_folding");
String source = "Ansprüche";
String[] expected = new String[]{"Anspruche", "Ansprüche"};
Tokenizer tokenizer = new WhitespaceTokenizer();

View File

@ -49,12 +49,25 @@ import static java.util.Collections.singletonMap;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
public class AnalysisServiceTests extends ESTestCase {
public class AnalysisRegistryTests extends ESTestCase {
private AnalysisRegistry registry;
private static AnalyzerProvider<?> analyzerProvider(final String name) {
return new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDEX, new EnglishAnalyzer());
}
@Override
public void setUp() throws Exception {
super.setUp();
Settings settings = Settings
.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
registry = new AnalysisRegistry(new Environment(settings),
emptyMap(), emptyMap(), emptyMap(), emptyMap());
}
public void testDefaultAnalyzers() throws IOException {
Version version = VersionUtils.randomVersion(random());
Settings settings = Settings
@ -63,29 +76,30 @@ public class AnalysisServiceTests extends ESTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
AnalysisService analysisService = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap())
.build(idxSettings);
assertThat(analysisService.defaultIndexAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
assertThat(analysisService.defaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
assertThat(analysisService.defaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap())
.build(idxSettings);
assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
}
public void testOverrideDefaultAnalyzer() throws IOException {
Version version = VersionUtils.randomVersion(random());
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
AnalysisService analysisService = new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
singletonMap("default", analyzerProvider("default")), emptyMap(), emptyMap(), emptyMap());
assertThat(analysisService.defaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(analysisService.defaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(analysisService.defaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings),
singletonMap("default", analyzerProvider("default"))
, emptyMap(), emptyMap(), emptyMap());
assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
}
public void testOverrideDefaultIndexAnalyzerIsUnsupported() {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_alpha1, Version.CURRENT);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
AnalyzerProvider<?> defaultIndex = new PreBuiltAnalyzerProvider("default_index", AnalyzerScope.INDEX, new EnglishAnalyzer());
AnalyzerProvider<?> defaultIndex = new PreBuiltAnalyzerProvider("default_index", AnalyzerScope.INDEX, new EnglishAnalyzer());
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
() -> registry.build(IndexSettingsModule.newIndexSettings("index", settings),
singletonMap("default_index", defaultIndex), emptyMap(), emptyMap(), emptyMap()));
assertTrue(e.getMessage().contains("[index.analysis.analyzer.default_index] is not supported"));
}
@ -94,21 +108,21 @@ public class AnalysisServiceTests extends ESTestCase {
Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(),
VersionUtils.getPreviousVersion(Version.V_5_0_0_alpha1));
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
AnalysisService analysisService = new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings),
singletonMap("default_index", analyzerProvider("default_index")), emptyMap(), emptyMap(), emptyMap());
assertThat(analysisService.defaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(analysisService.defaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
assertThat(analysisService.defaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
}
public void testOverrideDefaultSearchAnalyzer() {
Version version = VersionUtils.randomVersion(random());
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
AnalysisService analysisService = new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings),
singletonMap("default_search", analyzerProvider("default_search")), emptyMap(), emptyMap(), emptyMap());
assertThat(analysisService.defaultIndexAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
assertThat(analysisService.defaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(analysisService.defaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
}
public void testBackCompatOverrideDefaultIndexAndSearchAnalyzer() {
@ -118,11 +132,11 @@ public class AnalysisServiceTests extends ESTestCase {
Map<String, AnalyzerProvider<?>> analyzers = new HashMap<>();
analyzers.put("default_index", analyzerProvider("default_index"));
analyzers.put("default_search", analyzerProvider("default_search"));
AnalysisService analysisService = new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings),
analyzers, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
assertThat(analysisService.defaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(analysisService.defaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(analysisService.defaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
}
public void testConfigureCamelCaseTokenFilter() throws IOException {
@ -137,10 +151,10 @@ public class AnalysisServiceTests extends ESTestCase {
.putArray("index.analysis.analyzer.custom_analyzer_1.filter", "lowercase", "word_delimiter").build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
AnalysisService analysisService = new AnalysisModule(new Environment(settings), emptyList()).getAnalysisRegistry()
IndexAnalyzers indexAnalyzers = new AnalysisModule(new Environment(settings), emptyList()).getAnalysisRegistry()
.build(idxSettings);
try (NamedAnalyzer custom_analyser = analysisService.analyzer("custom_analyzer")) {
try (NamedAnalyzer custom_analyser = indexAnalyzers.get("custom_analyzer")) {
assertNotNull(custom_analyser);
TokenStream tokenStream = custom_analyser.tokenStream("foo", "J2SE j2ee");
tokenStream.reset();
@ -154,7 +168,7 @@ public class AnalysisServiceTests extends ESTestCase {
assertEquals("j2ee", token.get(1));
}
try (NamedAnalyzer custom_analyser = analysisService.analyzer("custom_analyzer_1")) {
try (NamedAnalyzer custom_analyser = indexAnalyzers.get("custom_analyzer_1")) {
assertNotNull(custom_analyser);
TokenStream tokenStream = custom_analyser.tokenStream("foo", "J2SE j2ee");
tokenStream.reset();
@ -178,14 +192,14 @@ public class AnalysisServiceTests extends ESTestCase {
Settings indexSettings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
AnalysisService analysisService = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap())
IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap())
.build(idxSettings);
AnalysisService otherAnalysisSergice = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(),
IndexAnalyzers otherIndexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(),
emptyMap()).build(idxSettings);
final int numIters = randomIntBetween(5, 20);
for (int i = 0; i < numIters; i++) {
PreBuiltAnalyzers preBuiltAnalyzers = RandomPicks.randomFrom(random(), PreBuiltAnalyzers.values());
assertSame(analysisService.analyzer(preBuiltAnalyzers.name()), otherAnalysisSergice.analyzer(preBuiltAnalyzers.name()));
assertSame(indexAnalyzers.get(preBuiltAnalyzers.name()), otherIndexAnalyzers.get(preBuiltAnalyzers.name()));
}
}
@ -204,4 +218,15 @@ public class AnalysisServiceTests extends ESTestCase {
() -> new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap()).build(idxSettings));
assertThat(e.getMessage(), equalTo("analyzer [test_analyzer] must specify either an analyzer type, or a tokenizer"));
}
public void testCloseIndexAnalyzersMultipleTimes() throws IOException {
Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
Settings indexSettings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap())
.build(idxSettings);
indexAnalyzers.close();
indexAnalyzers.close();
}
}

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import java.io.IOException;
@ -34,21 +35,25 @@ import static java.util.Collections.emptyList;
public class AnalysisTestsHelper {
public static AnalysisService createAnalysisServiceFromClassPath(Path baseDir, String resource) throws IOException {
public static ESTestCase.TestAnalysis createTestAnalysisFromClassPath(Path baseDir, String resource) throws IOException {
Settings settings = Settings.builder()
.loadFromStream(resource, AnalysisTestsHelper.class.getResourceAsStream(resource))
.put(Environment.PATH_HOME_SETTING.getKey(), baseDir.toString())
.build();
return createAnalysisServiceFromSettings(settings);
return createTestAnalysisFromSettings(settings);
}
public static AnalysisService createAnalysisServiceFromSettings(
public static ESTestCase.TestAnalysis createTestAnalysisFromSettings(
Settings settings) throws IOException {
if (settings.get(IndexMetaData.SETTING_VERSION_CREATED) == null) {
settings = Settings.builder().put(settings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
}
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
return new AnalysisModule(new Environment(settings), emptyList()).getAnalysisRegistry().build(idxSettings);
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings);
AnalysisRegistry analysisRegistry = new AnalysisModule(new Environment(settings), emptyList()).getAnalysisRegistry();
return new ESTestCase.TestAnalysis(analysisRegistry.build(indexSettings),
analysisRegistry.buildTokenFilterFactories(indexSettings),
analysisRegistry.buildTokenizerFactories(indexSettings),
analysisRegistry.buildCharFilterFactories(indexSettings));
}
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@ -30,8 +31,8 @@ public class CJKFilterFactoryTests extends ESTokenStreamTestCase {
private static final String RESOURCE = "/org/elasticsearch/index/analysis/cjk_analysis.json";
public void testDefault() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_bigram");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("cjk_bigram");
String source = "多くの学生が試験に落ちた。";
String[] expected = new String[]{"多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" };
Tokenizer tokenizer = new StandardTokenizer();
@ -40,8 +41,8 @@ public class CJKFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testNoFlags() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_no_flags");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("cjk_no_flags");
String source = "多くの学生が試験に落ちた。";
String[] expected = new String[]{"多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" };
Tokenizer tokenizer = new StandardTokenizer();
@ -50,8 +51,8 @@ public class CJKFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testHanOnly() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_only");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("cjk_han_only");
String source = "多くの学生が試験に落ちた。";
String[] expected = new String[]{"", "", "", "学生", "", "試験", "", "", "", "" };
Tokenizer tokenizer = new StandardTokenizer();
@ -60,8 +61,8 @@ public class CJKFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testHanUnigramOnly() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_unigram_only");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("cjk_han_unigram_only");
String source = "多くの学生が試験に落ちた。";
String[] expected = new String[]{"", "", "", "", "学生", "", "", "", "試験", "", "", "", "", "" };
Tokenizer tokenizer = new StandardTokenizer();

View File

@ -26,7 +26,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import static org.elasticsearch.test.ESTestCase.createAnalysisService;
import static org.elasticsearch.test.ESTestCase.createTestAnalysis;
/**
*/
@ -41,8 +41,8 @@ public class CharFilterTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
AnalysisService analysisService = createAnalysisService(idxSettings, settings);
NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
NamedAnalyzer analyzer1 = indexAnalyzers.get("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
@ -58,9 +58,9 @@ public class CharFilterTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
AnalysisService analysisService = createAnalysisService(idxSettings, settings);
NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
NamedAnalyzer analyzer1 = indexAnalyzers.get("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
@ -80,8 +80,8 @@ public class CharFilterTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
AnalysisService analysisService = createAnalysisService(idxSettings, settings);
NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
NamedAnalyzer analyzer1 = indexAnalyzers.get("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "faBBbBB aBbbbBf"), new String[]{"foo", "oof"});
}

View File

@ -61,9 +61,7 @@ public class CompoundAnalysisTests extends ESTestCase {
return singletonMap("myfilter", MyFilterTokenFilterFactory::new);
}
}));
AnalysisService analysisService = analysisModule.getAnalysisRegistry().build(idxSettings);
TokenFilterFactory filterFactory = analysisService.tokenFilter("dict_dec");
TokenFilterFactory filterFactory = analysisModule.getAnalysisRegistry().buildTokenFilterFactories(idxSettings).get("dict_dec");
MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
}
@ -85,9 +83,8 @@ public class CompoundAnalysisTests extends ESTestCase {
return singletonMap("myfilter", MyFilterTokenFilterFactory::new);
}
}));
AnalysisService analysisService = analysisModule.getAnalysisRegistry().build(idxSettings);
Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings);
Analyzer analyzer = indexAnalyzers.get(analyzerName).analyzer();
AllEntries allEntries = new AllEntries();
allEntries.addText("field1", text, 1.0f);

View File

@ -36,8 +36,8 @@ public class HunspellTokenFilterFactoryTests extends ESTestCase {
.put("index.analysis.filter.en_US.locale", "en_US")
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("en_US");
TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("en_US");
assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
HunspellTokenFilterFactory hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
assertThat(hunspellTokenFilter.dedup(), is(true));
@ -50,8 +50,8 @@ public class HunspellTokenFilterFactoryTests extends ESTestCase {
.put("index.analysis.filter.en_US.locale", "en_US")
.build();
analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
tokenFilter = analysisService.tokenFilter("en_US");
analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
tokenFilter = analysis.tokenFilter.get("en_US");
assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
assertThat(hunspellTokenFilter.dedup(), is(false));

View File

@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.junit.Assert;
@ -35,8 +36,8 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
private static final String RESOURCE = "/org/elasticsearch/index/analysis/keep_analysis.json";
public void testLoadWithoutSettings() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("keep");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("keep");
Assert.assertNull(tokenFilter);
}
@ -48,7 +49,7 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.broken_keep_filter.keep_words", "[\"Hello\", \"worlD\"]")
.build();
try {
AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
Assert.fail("path and array are configured");
} catch (IllegalArgumentException e) {
} catch (IOException e) {
@ -64,7 +65,7 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
.build();
try {
// test our none existing setup is picked up
AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
fail("expected an exception due to non existent keep_words_path");
} catch (IllegalArgumentException e) {
} catch (IOException e) {
@ -76,7 +77,7 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
.build();
try {
// test our none existing setup is picked up
AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
fail("expected an exception indicating that you can't use [keep_words_path] with [keep_words] ");
} catch (IllegalArgumentException e) {
} catch (IOException e) {
@ -86,8 +87,8 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testCaseInsensitiveMapping() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_keep_filter");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_keep_filter");
assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class));
String source = "hello small world";
String[] expected = new String[]{"hello", "world"};
@ -97,8 +98,8 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testCaseSensitiveMapping() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_case_sensitive_keep_filter");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_case_sensitive_keep_filter");
assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class));
String source = "Hello small world";
String[] expected = new String[]{"Hello"};

View File

@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@ -37,8 +38,8 @@ public class KeepTypesFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.keep_numbers.type", "keep_types")
.putArray("index.analysis.filter.keep_numbers.types", new String[] {"<NUM>", "<SOMETHINGELSE>"})
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("keep_numbers");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("keep_numbers");
assertThat(tokenFilter, instanceOf(KeepTypesFilterFactory.class));
String source = "Hello 123 world";
String[] expected = new String[]{"123"};

View File

@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@ -34,9 +35,9 @@ public class LimitTokenCountFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.limit_default.type", "limit")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
{
TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_default");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("limit_default");
String source = "the quick brown fox";
String[] expected = new String[] { "the" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -44,7 +45,7 @@ public class LimitTokenCountFilterFactoryTests extends ESTokenStreamTestCase {
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
{
TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("limit");
String source = "the quick brown fox";
String[] expected = new String[] { "the" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -61,8 +62,8 @@ public class LimitTokenCountFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.limit_1.consume_all_tokens", true)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("limit_1");
String source = "the quick brown fox";
String[] expected = new String[] { "the", "quick", "brown" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -76,8 +77,8 @@ public class LimitTokenCountFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.limit_1.consume_all_tokens", false)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("limit_1");
String source = "the quick brown fox";
String[] expected = new String[] { "the", "quick", "brown" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -92,8 +93,8 @@ public class LimitTokenCountFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.limit_1.consume_all_tokens", true)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("limit_1");
String source = "the quick brown fox";
String[] expected = new String[] { "the", "quick", "brown", "fox" };
Tokenizer tokenizer = new WhitespaceTokenizer();

View File

@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@ -36,8 +37,8 @@ public class MinHashFilterFactoryTests extends ESTokenStreamTestCase {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("min_hash");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("min_hash");
String source = "the quick brown fox";
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));
@ -57,8 +58,8 @@ public class MinHashFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.test_min_hash.with_rotation", false)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("test_min_hash");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("test_min_hash");
String source = "sushi";
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));

View File

@ -27,7 +27,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import static org.elasticsearch.test.ESTestCase.createAnalysisService;
import static org.elasticsearch.test.ESTestCase.createTestAnalysis;
import static org.hamcrest.Matchers.containsString;
public class PatternCaptureTokenFilterTests extends ESTokenStreamTestCase {
@ -40,17 +40,16 @@ public class PatternCaptureTokenFilterTests extends ESTokenStreamTestCase {
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
AnalysisService analysisService = createAnalysisService(idxSettings, settings);
NamedAnalyzer analyzer1 = analysisService.analyzer("single");
IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
NamedAnalyzer analyzer1 = indexAnalyzers.get("single");
assertTokenStreamContents(analyzer1.tokenStream("test", "foobarbaz"), new String[]{"foobarbaz","foobar","foo"});
NamedAnalyzer analyzer2 = analysisService.analyzer("multi");
NamedAnalyzer analyzer2 = indexAnalyzers.get("multi");
assertTokenStreamContents(analyzer2.tokenStream("test", "abc123def"), new String[]{"abc123def","abc","123","def"});
NamedAnalyzer analyzer3 = analysisService.analyzer("preserve");
NamedAnalyzer analyzer3 = indexAnalyzers.get("preserve");
assertTokenStreamContents(analyzer3.tokenStream("test", "foobarbaz"), new String[]{"foobar","foo"});
}

View File

@ -26,6 +26,7 @@ import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@ -38,8 +39,8 @@ public class ShingleTokenFilterFactoryTests extends ESTokenStreamTestCase {
private static final String RESOURCE = "/org/elasticsearch/index/analysis/shingle_analysis.json";
public void testDefault() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("shingle");
String source = "the quick brown fox";
String[] expected = new String[]{"the", "the quick", "quick", "quick brown", "brown", "brown fox", "fox"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -48,8 +49,8 @@ public class ShingleTokenFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testInverseMapping() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("shingle_inverse");
assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class));
String source = "the quick brown fox";
String[] expected = new String[]{"the_quick_brown", "quick_brown_fox"};
@ -59,8 +60,8 @@ public class ShingleTokenFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testInverseMappingNoShingles() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("shingle_inverse");
assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class));
String source = "the quick";
String[] expected = new String[]{"the", "quick"};
@ -70,8 +71,8 @@ public class ShingleTokenFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testFillerToken() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_filler");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("shingle_filler");
String source = "simon the sorcerer";
String[] expected = new String[]{"simon FILLER", "simon FILLER sorcerer", "FILLER sorcerer"};
Tokenizer tokenizer = new WhitespaceTokenizer();

View File

@ -26,6 +26,7 @@ import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.VersionUtils;
@ -53,13 +54,14 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_english");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_english");
assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class));
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo bar"));
TokenStream create = tokenFilter.create(tokenizer);
NamedAnalyzer analyzer = analysisService.analyzer("my_english");
IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers;
NamedAnalyzer analyzer = indexAnalyzers.get("my_english");
assertThat(create, instanceOf(PorterStemFilter.class));
assertAnalyzesTo(analyzer, "consolingly", new String[]{"consolingli"});
}
@ -80,13 +82,14 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_porter2");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_porter2");
assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class));
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo bar"));
TokenStream create = tokenFilter.create(tokenizer);
NamedAnalyzer analyzer = analysisService.analyzer("my_porter2");
IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers;
NamedAnalyzer analyzer = indexAnalyzers.get("my_porter2");
assertThat(create, instanceOf(SnowballFilter.class));
assertAnalyzesTo(analyzer, "possibly", new String[]{"possibl"});
}

View File

@ -27,7 +27,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import static org.elasticsearch.test.ESTestCase.createAnalysisService;
import static org.elasticsearch.test.ESTestCase.createTestAnalysis;
public class StopAnalyzerTests extends ESTokenStreamTestCase {
public void testDefaultsCompoundAnalysis() throws Exception {
@ -38,13 +38,12 @@ public class StopAnalyzerTests extends ESTokenStreamTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
AnalysisService analysisService = createAnalysisService(idxSettings, settings);
NamedAnalyzer analyzer1 = analysisService.analyzer("analyzer1");
IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
NamedAnalyzer analyzer1 = indexAnalyzers.get("analyzer1");
assertTokenStreamContents(analyzer1.tokenStream("test", "to be or not to be"), new String[0]);
NamedAnalyzer analyzer2 = analysisService.analyzer("analyzer2");
NamedAnalyzer analyzer2 = indexAnalyzers.get("analyzer2");
assertTokenStreamContents(analyzer2.tokenStream("test", "to be or not to be"), new String[0]);
}

View File

@ -28,6 +28,7 @@ import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings.Builder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@ -47,7 +48,7 @@ public class StopTokenFilterTests extends ESTokenStreamTestCase {
builder.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString());
Settings settings = builder.build();
try {
AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("enable_position_increments is not supported anymore"));
@ -62,8 +63,8 @@ public class StopTokenFilterTests extends ESTokenStreamTestCase {
// don't specify
}
builder.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString());
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(builder.build());
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_stop");
assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo bar"));
@ -77,8 +78,8 @@ public class StopTokenFilterTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.my_stop.remove_trailing", false)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_stop");
assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo an"));

View File

@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@ -30,11 +31,11 @@ import java.io.StringReader;
public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase {
public void testDefault() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -43,13 +44,13 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testCatenateWords() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
.put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"PowerShot", "500", "42", "wifi", "wifi", "4000", "j", "2", "se", "ONeil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -58,13 +59,13 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testCatenateNumbers() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
.put("index.analysis.filter.my_word_delimiter.catenate_numbers", "true")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"Power", "Shot", "50042", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -73,14 +74,14 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testCatenateAll() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
.put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
.put("index.analysis.filter.my_word_delimiter.catenate_all", "true")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"PowerShot", "50042", "wifi", "wifi4000", "j2se", "ONeil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -89,12 +90,12 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testSplitOnCaseChange() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.split_on_case_change", "false")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot";
String[] expected = new String[]{"PowerShot"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -103,12 +104,12 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testPreserveOriginal() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.preserve_original", "true")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"PowerShot", "Power", "Shot", "500-42", "500", "42", "wi-fi", "wi", "fi", "wi-fi-4000", "wi", "fi", "4000", "j2se", "j", "2", "se", "O'Neil's", "O", "Neil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -117,12 +118,12 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testStemEnglishPossessive() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.stem_english_possessive", "false")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil", "s"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -132,13 +133,13 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
/** Correct offset order when doing both parts and concatenation: PowerShot is a synonym of Power */
public void testPartsAndCatenate() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
.put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot";
String[] expected = new String[]{"Power", "PowerShot", "Shot" };
Tokenizer tokenizer = new WhitespaceTokenizer();

View File

@ -24,9 +24,10 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.AnalysisTestsHelper;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.junit.Assert;
@ -43,7 +44,7 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.build();
try {
AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
Assert.fail("[common_words] or [common_words_path] is set");
} catch (IllegalArgumentException e) {
} catch (IOException e) {
@ -58,9 +59,9 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
{
TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_default");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_default");
String source = "the quick brown is a fox Or noT";
String[] expected = new String[] { "the", "quick", "brown", "is", "a", "fox", "Or", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -75,9 +76,9 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.putArray("index.analysis.filter.common_grams_default.common_words", "chromosome", "protein")
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
{
TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_default");
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_default");
String source = "the quick brown is a fox Or noT";
String[] expected = new String[] { "the", "quick", "brown", "is", "a", "fox", "Or", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -94,8 +95,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.putArray("index.analysis.filter.common_grams_1.common_words", "the", "Or", "Not", "a", "is", "an", "they", "are")
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_1");
String source = "the quick brown is a fox or noT";
String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "fox_or", "or", "or_noT", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -108,8 +109,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.putArray("index.analysis.filter.common_grams_2.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_2");
String source = "the quick brown is a fox or why noT";
String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "or", "why", "why_noT", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -121,8 +122,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.putArray("index.analysis.filter.common_grams_3.common_words", "the", "or", "not", "a", "is", "an", "they", "are")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_3");
String source = "the quick brown is a fox Or noT";
String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "Or", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -138,15 +139,17 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createHome())
.build();
{
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer();
IndexAnalyzers indexAnalyzers = AnalysisTestsHelper.createTestAnalysisFromSettings(settings)
.indexAnalyzers;
Analyzer analyzer = indexAnalyzers.get("commongramsAnalyzer").analyzer();
String source = "the quick brown is a fox or not";
String[] expected = new String[] { "the", "quick", "quick_brown", "brown", "brown_is", "is", "a", "a_fox", "fox", "fox_or", "or", "not" };
assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
}
{
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer_file").analyzer();
IndexAnalyzers indexAnalyzers = AnalysisTestsHelper.createTestAnalysisFromSettings(settings)
.indexAnalyzers;
Analyzer analyzer = indexAnalyzers.get("commongramsAnalyzer_file").analyzer();
String source = "the quick brown is a fox or not";
String[] expected = new String[] { "the", "quick", "quick_brown", "brown", "brown_is", "is", "a", "a_fox", "fox", "fox_or", "or", "not" };
assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
@ -161,8 +164,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.common_grams_1.ignore_case", true)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_1");
String source = "the quick brown is a fox or noT";
String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox_or", "or_noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -176,8 +179,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.common_grams_2.ignore_case", false)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_2");
String source = "the quick brown is a fox or why noT";
String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "or", "why_noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -190,8 +193,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.putArray("index.analysis.filter.common_grams_3.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_3");
String source = "the quick brown is a fox or why noT";
String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "or", "why_noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -204,8 +207,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.putArray("index.analysis.filter.common_grams_4.common_words", "the", "or", "not", "a", "is", "an", "they", "are")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_4");
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_4");
String source = "the quick brown is a fox Or noT";
String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "Or", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@ -221,15 +224,17 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createHome())
.build();
{
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer();
IndexAnalyzers indexAnalyzers = AnalysisTestsHelper.createTestAnalysisFromSettings(settings)
.indexAnalyzers;
Analyzer analyzer = indexAnalyzers.get("commongramsAnalyzer").analyzer();
String source = "the quick brown is a fox or not";
String[] expected = new String[] { "the", "quick_brown", "brown_is", "is", "a_fox", "fox_or", "or", "not" };
assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
}
{
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer_file").analyzer();
IndexAnalyzers indexAnalyzers = AnalysisTestsHelper.createTestAnalysisFromSettings(settings)
.indexAnalyzers;
Analyzer analyzer = indexAnalyzers.get("commongramsAnalyzer_file").analyzer();
String source = "the quick brown is a fox or not";
String[] expected = new String[] { "the", "quick_brown", "brown_is", "is", "a_fox", "fox_or", "or", "not" };
assertTokenStreamContents(analyzer.tokenStream("test", source), expected);

View File

@ -30,7 +30,7 @@ import org.elasticsearch.common.lucene.all.AllTokenStream;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import org.hamcrest.MatcherAssert;
@ -46,7 +46,7 @@ import static org.hamcrest.Matchers.equalTo;
*/
public class SynonymsAnalysisTests extends ESTestCase {
protected final Logger logger = Loggers.getLogger(getClass());
private AnalysisService analysisService;
private IndexAnalyzers indexAnalyzers;
public void testSynonymsAnalysis() throws IOException {
InputStream synonyms = getClass().getResourceAsStream("synonyms.txt");
@ -64,7 +64,7 @@ public class SynonymsAnalysisTests extends ESTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
analysisService = createAnalysisService(idxSettings, settings);
indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
match("synonymAnalyzer", "kimchy is the dude abides", "shay is the elasticsearch man!");
match("synonymAnalyzer_file", "kimchy is the dude abides", "shay is the elasticsearch man!");
@ -74,8 +74,7 @@ public class SynonymsAnalysisTests extends ESTestCase {
}
private void match(String analyzerName, String source, String target) throws IOException {
Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
Analyzer analyzer = indexAnalyzers.get(analyzerName).analyzer();
TokenStream stream = AllTokenStream.allTokenStream("_all", source, 1.0f, analyzer);
stream.reset();

View File

@ -37,7 +37,7 @@ import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry;
@ -95,9 +95,9 @@ public class CodecTests extends ESTestCase {
.build();
IndexSettings settings = IndexSettingsModule.newIndexSettings("_na", nodeSettings);
SimilarityService similarityService = new SimilarityService(settings, Collections.emptyMap());
AnalysisService analysisService = createAnalysisService(settings, nodeSettings);
IndexAnalyzers indexAnalyzers = createTestAnalysis(settings, nodeSettings).indexAnalyzers;
MapperRegistry mapperRegistry = new MapperRegistry(Collections.emptyMap(), Collections.emptyMap());
MapperService service = new MapperService(settings, analysisService, similarityService, mapperRegistry, () -> null);
MapperService service = new MapperService(settings, indexAnalyzers, similarityService, mapperRegistry, () -> null);
return new CodecService(service, ESLoggerFactory.getLogger("test"));
}

View File

@ -26,6 +26,7 @@ import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.filter.RegexFilter;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
@ -67,10 +68,13 @@ import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.engine.Engine.Searcher;
import org.elasticsearch.index.mapper.ContentPath;
@ -2006,10 +2010,12 @@ public class InternalEngineTests extends ESTestCase {
RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("test");
Index index = new Index(indexName, "_na_");
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings);
AnalysisService analysisService = new AnalysisService(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
IndexAnalyzers indexAnalyzers = null;
NamedAnalyzer defaultAnalyzer = new NamedAnalyzer("default", new StandardAnalyzer());
indexAnalyzers = new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, Collections.emptyMap());
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null);
MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry, () -> null);
DocumentMapper.Builder b = new DocumentMapper.Builder(rootBuilder, mapperService);
this.docMapper = b.build(mapperService);
}

View File

@ -25,7 +25,6 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.geo.GeoHashUtils;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
@ -63,7 +62,7 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase {
Collections.singletonMap(ExternalMetadataMapper.CONTENT_TYPE, new ExternalMetadataMapper.TypeParser()));
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(
XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject(ExternalMetadataMapper.CONTENT_TYPE)
@ -112,7 +111,7 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase {
MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap());
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(
XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
@ -182,7 +181,7 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase {
MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap());
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(
XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")

View File

@ -24,24 +24,11 @@ import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.FieldNamesFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.TermBasedFieldType;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException;
@ -244,9 +231,9 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase {
Collections.singletonMap("_dummy", new DummyMetadataFieldMapper.TypeParser())
);
final MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), mapperService,
indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
ParsedDocument parsedDocument = mapper.parse("index", "type", "id", new BytesArray("{}"));

View File

@ -18,30 +18,22 @@
*/
package org.elasticsearch.index.mapper;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexableField;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParentFieldMapper;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.test.ESSingleNodeTestCase;
@ -111,10 +103,11 @@ public class ParentFieldMapperTests extends ESSingleNodeTestCase {
public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception {
Index index = new Index("_index", "testUUID");
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY);
AnalysisService analysisService = new AnalysisService(indexSettings, Collections.emptyMap(), Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap());
NamedAnalyzer namedAnalyzer = new NamedAnalyzer("default", new StandardAnalyzer());
IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, namedAnalyzer, namedAnalyzer, namedAnalyzer,
Collections.emptyMap());
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService,
MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService,
new IndicesModule(emptyList()).getMapperRegistry(), () -> null);
XContentBuilder mappingSource = jsonBuilder().startObject().startObject("some_type")
.startObject("properties")

View File

@ -41,9 +41,9 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.Analysis;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.AnalysisTestsHelper;
import org.elasticsearch.index.analysis.CustomAnalyzer;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.MappingCharFilterFactory;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.analysis.PatternReplaceCharFilterFactory;
@ -79,11 +79,11 @@ import static org.hamcrest.Matchers.is;
*/
public class AnalysisModuleTests extends ModuleTestCase {
public AnalysisService getAnalysisService(Settings settings) throws IOException {
return getAnalysisService(getNewRegistry(settings), settings);
public IndexAnalyzers getIndexAnalyzers(Settings settings) throws IOException {
return getIndexAnalyzers(getNewRegistry(settings), settings);
}
public AnalysisService getAnalysisService(AnalysisRegistry registry, Settings settings) throws IOException {
public IndexAnalyzers getIndexAnalyzers(AnalysisRegistry registry, Settings settings) throws IOException {
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
return registry.build(idxSettings);
}
@ -136,9 +136,9 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5))
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings);
AnalysisService as = getAnalysisService(newRegistry, settings);
assertThat(as.analyzer("default").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
assertThat(as.analyzer("default_search").analyzer(), is(instanceOf(EnglishAnalyzer.class)));
IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings);
assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(EnglishAnalyzer.class)));
}
public void testAnalyzerAliasReferencesAlias() throws IOException {
@ -152,10 +152,11 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5))
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings);
AnalysisService as = getAnalysisService(newRegistry, settings);
assertThat(as.analyzer("default").analyzer(), is(instanceOf(GermanAnalyzer.class)));
IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings);
assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(GermanAnalyzer.class)));
// analyzer types are bound early before we resolve aliases
assertThat(as.analyzer("default_search").analyzer(), is(instanceOf(StandardAnalyzer.class)));
assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(StandardAnalyzer.class)));
}
public void testAnalyzerAliasDefault() throws IOException {
@ -167,9 +168,9 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5))
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings);
AnalysisService as = getAnalysisService(newRegistry, settings);
assertThat(as.analyzer("default").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
assertThat(as.analyzer("default_search").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings);
assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
}
public void testAnalyzerAliasMoreThanOnce() throws IOException {
@ -183,7 +184,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings);
IllegalStateException ise = expectThrows(IllegalStateException.class, () -> getAnalysisService(newRegistry, settings));
IllegalStateException ise = expectThrows(IllegalStateException.class, () -> getIndexAnalyzers(newRegistry, settings));
assertEquals("alias [default] is already used by [foobar]", ise.getMessage());
}
@ -196,7 +197,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisRegistry registry = getNewRegistry(settings);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> getAnalysisService(registry, settings));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> getIndexAnalyzers(registry, settings));
assertEquals("setting [index.analysis.analyzer.foobar.alias] is not supported", e.getMessage());
}
@ -208,7 +209,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings2);
AnalysisService analysisService2 = getAnalysisService(newRegistry, settings2);
IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings2);
// registry always has the current version
assertThat(newRegistry.getAnalyzer("default"), is(instanceOf(NamedAnalyzer.class)));
@ -217,20 +218,20 @@ public class AnalysisModuleTests extends ModuleTestCase {
assertEquals(Version.CURRENT.luceneVersion, defaultNamedAnalyzer.analyzer().getVersion());
// analysis service has the expected version
assertThat(analysisService2.analyzer("standard").analyzer(), is(instanceOf(StandardAnalyzer.class)));
assertEquals(Version.V_2_0_0.luceneVersion, analysisService2.analyzer("standard").analyzer().getVersion());
assertEquals(Version.V_2_0_0.luceneVersion, analysisService2.analyzer("thai").analyzer().getVersion());
assertThat(indexAnalyzers.get("standard").analyzer(), is(instanceOf(StandardAnalyzer.class)));
assertEquals(Version.V_2_0_0.luceneVersion, indexAnalyzers.get("standard").analyzer().getVersion());
assertEquals(Version.V_2_0_0.luceneVersion, indexAnalyzers.get("thai").analyzer().getVersion());
assertThat(analysisService2.analyzer("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class)));
assertEquals(org.apache.lucene.util.Version.fromBits(3,6,0), analysisService2.analyzer("custom7").analyzer().getVersion());
assertThat(indexAnalyzers.get("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class)));
assertEquals(org.apache.lucene.util.Version.fromBits(3,6,0), indexAnalyzers.get("custom7").analyzer().getVersion());
}
private void assertTokenFilter(String name, Class<?> clazz) throws IOException {
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter(name);
TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get(name);
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo bar"));
TokenStream stream = tokenFilter.create(tokenizer);
@ -238,8 +239,8 @@ public class AnalysisModuleTests extends ModuleTestCase {
}
private void testSimpleConfiguration(Settings settings) throws IOException {
AnalysisService analysisService = getAnalysisService(settings);
Analyzer analyzer = analysisService.analyzer("custom1").analyzer();
IndexAnalyzers indexAnalyzers = getIndexAnalyzers(settings);
Analyzer analyzer = indexAnalyzers.get("custom1").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom1 = (CustomAnalyzer) analyzer;
@ -249,23 +250,23 @@ public class AnalysisModuleTests extends ModuleTestCase {
StopTokenFilterFactory stop1 = (StopTokenFilterFactory) custom1.tokenFilters()[0];
assertThat(stop1.stopWords().size(), equalTo(1));
analyzer = analysisService.analyzer("custom2").analyzer();
analyzer = indexAnalyzers.get("custom2").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
// verify position increment gap
analyzer = analysisService.analyzer("custom6").analyzer();
analyzer = indexAnalyzers.get("custom6").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom6 = (CustomAnalyzer) analyzer;
assertThat(custom6.getPositionIncrementGap("any_string"), equalTo(256));
// verify characters mapping
analyzer = analysisService.analyzer("custom5").analyzer();
analyzer = indexAnalyzers.get("custom5").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom5 = (CustomAnalyzer) analyzer;
assertThat(custom5.charFilters()[0], instanceOf(MappingCharFilterFactory.class));
// check custom pattern replace filter
analyzer = analysisService.analyzer("custom3").analyzer();
analyzer = indexAnalyzers.get("custom3").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom3 = (CustomAnalyzer) analyzer;
PatternReplaceCharFilterFactory patternReplaceCharFilterFactory = (PatternReplaceCharFilterFactory) custom3.charFilters()[0];
@ -273,7 +274,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
assertThat(patternReplaceCharFilterFactory.getReplacement(), equalTo("replacedSample $1"));
// check custom class name (my)
analyzer = analysisService.analyzer("custom4").analyzer();
analyzer = indexAnalyzers.get("custom4").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom4 = (CustomAnalyzer) analyzer;
assertThat(custom4.tokenFilters()[0], instanceOf(MyFilterTokenFilterFactory.class));
@ -333,7 +334,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, "1")
.build();
try {
getAnalysisService(settings);
getIndexAnalyzers(settings);
fail("This should fail with IllegalArgumentException because the analyzers name starts with _");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), either(equalTo("analyzer name must not start with '_'. got \"_invalid_name\""))
@ -350,7 +351,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5))
.build();
try {
getAnalysisService(settings);
getIndexAnalyzers(settings);
fail("This should fail with IllegalArgumentException because the analyzers alias starts with _");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("analyzer name must not start with '_'. got \"_invalid_name\""));
@ -365,7 +366,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.build();
try {
getAnalysisService(settings);
getIndexAnalyzers(settings);
fail("Analyzer should fail if it has position_offset_gap");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("Option [position_offset_gap] in Custom Analyzer [custom] " +

View File

@ -390,7 +390,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
if (analyzer != null) {
return analyzer;
} else {
return context.getAnalysisService().defaultIndexAnalyzer();
return context.getIndexAnalyzers().getDefaultIndexAnalyzer();
}
}
};

View File

@ -42,9 +42,9 @@ import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStre
public class IcuTokenizerFactoryTests extends ESTestCase {
public void testSimpleIcuTokenizer() throws IOException {
AnalysisService analysisService = createAnalysisService();
TestAnalysis analysis = createTestAnalysis();
TokenizerFactory tokenizerFactory = analysisService.tokenizer("icu_tokenizer");
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("icu_tokenizer");
ICUTokenizer tokenizer = (ICUTokenizer) tokenizerFactory.create();
Reader reader = new StringReader("向日葵, one-two");
@ -53,10 +53,10 @@ public class IcuTokenizerFactoryTests extends ESTestCase {
}
public void testIcuCustomizeRuleFile() throws IOException {
AnalysisService analysisService = createAnalysisService();
TestAnalysis analysis = createTestAnalysis();
// test the tokenizer with single rule file
TokenizerFactory tokenizerFactory = analysisService.tokenizer("user_rule_tokenizer");
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("user_rule_tokenizer");
ICUTokenizer tokenizer = (ICUTokenizer) tokenizerFactory.create();
Reader reader = new StringReader
("One-two punch. Brang-, not brung-it. This one--not that one--is the right one, -ish.");
@ -68,10 +68,10 @@ public class IcuTokenizerFactoryTests extends ESTestCase {
}
public void testMultipleIcuCustomizeRuleFiles() throws IOException {
AnalysisService analysisService = createAnalysisService();
TestAnalysis analysis = createTestAnalysis();
// test the tokenizer with two rule files
TokenizerFactory tokenizerFactory = analysisService.tokenizer("multi_rule_tokenizer");
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("multi_rule_tokenizer");
ICUTokenizer tokenizer = (ICUTokenizer) tokenizerFactory.create();
StringReader reader = new StringReader
("Some English. Немного русский. ข้อความภาษาไทยเล็ก ๆ น้อย ๆ More English.");
@ -84,7 +84,7 @@ public class IcuTokenizerFactoryTests extends ESTestCase {
}
private static AnalysisService createAnalysisService() throws IOException {
private static TestAnalysis createTestAnalysis() throws IOException {
InputStream keywords = IcuTokenizerFactoryTests.class.getResourceAsStream("KeywordTokenizer.rbbi");
InputStream latin = IcuTokenizerFactoryTests.class.getResourceAsStream("Latin-dont-break-on-hyphens.rbbi");
@ -102,6 +102,6 @@ public class IcuTokenizerFactoryTests extends ESTestCase {
.build();
Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build();
return createAnalysisService(new Index("test", "_na_"), nodeSettings, settings, new AnalysisICUPlugin());
return createTestAnalysis(new Index("test", "_na_"), nodeSettings, settings, new AnalysisICUPlugin());
}
}

View File

@ -31,24 +31,24 @@ import static org.hamcrest.Matchers.instanceOf;
*/
public class SimpleIcuAnalysisTests extends ESTestCase {
public void testDefaultsIcuAnalysis() throws IOException {
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisICUPlugin());
TokenizerFactory tokenizerFactory = analysisService.tokenizer("icu_tokenizer");
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("icu_tokenizer");
assertThat(tokenizerFactory, instanceOf(IcuTokenizerFactory.class));
TokenFilterFactory filterFactory = analysisService.tokenFilter("icu_normalizer");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("icu_normalizer");
assertThat(filterFactory, instanceOf(IcuNormalizerTokenFilterFactory.class));
filterFactory = analysisService.tokenFilter("icu_folding");
filterFactory = analysis.tokenFilter.get("icu_folding");
assertThat(filterFactory, instanceOf(IcuFoldingTokenFilterFactory.class));
filterFactory = analysisService.tokenFilter("icu_collation");
filterFactory = analysis.tokenFilter.get("icu_collation");
assertThat(filterFactory, instanceOf(IcuCollationTokenFilterFactory.class));
filterFactory = analysisService.tokenFilter("icu_transform");
filterFactory = analysis.tokenFilter.get("icu_transform");
assertThat(filterFactory, instanceOf(IcuTransformTokenFilterFactory.class));
CharFilterFactory charFilterFactory = analysisService.charFilter("icu_normalizer");
CharFilterFactory charFilterFactory = analysis.charFilter.get("icu_normalizer");
assertThat(charFilterFactory, instanceOf(IcuNormalizerCharFilterFactory.class));
}
}

View File

@ -50,9 +50,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.language", "tr")
.put("index.analysis.filter.myCollator.strength", "primary")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "I WİLL USE TURKİSH CASING", "ı will use turkish casıng");
}
@ -66,9 +66,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.decomposition", "canonical")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "I W\u0049\u0307LL USE TURKİSH CASING", "ı will use turkish casıng");
}
@ -82,9 +82,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.strength", "secondary")
.put("index.analysis.filter.myCollator.decomposition", "no")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "TESTING", "testing");
}
@ -99,9 +99,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.alternate", "shifted")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "foo-bar", "foo bar");
}
@ -117,9 +117,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.alternate", "shifted")
.put("index.analysis.filter.myCollator.variableTop", " ")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "foo bar", "foobar");
// now assert that punctuation still matters: foo-bar < foo bar
assertCollation(filterFactory, "foo-bar", "foo bar", -1);
@ -135,9 +135,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.language", "en")
.put("index.analysis.filter.myCollator.numeric", "true")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollation(filterFactory, "foobar-9", "foobar-10", -1);
}
@ -152,9 +152,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.caseLevel", "true")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "résumé", "resume");
assertCollatesToSame(filterFactory, "Résumé", "Resume");
// now assert that case still matters: resume < Resume
@ -172,9 +172,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.strength", "tertiary")
.put("index.analysis.filter.myCollator.caseFirst", "upper")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollation(filterFactory, "Resume", "resume", -1);
}
@ -200,9 +200,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.rules", tailoredRules)
.put("index.analysis.filter.myCollator.strength", "primary")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "Töne", "Toene");
}

View File

@ -37,8 +37,8 @@ public class SimpleIcuNormalizerCharFilterTests extends ESTestCase {
Settings settings = Settings.builder()
.put("index.analysis.char_filter.myNormalizerChar.type", "icu_normalizer")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
CharFilterFactory charFilterFactory = analysisService.charFilter("myNormalizerChar");
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
CharFilterFactory charFilterFactory = analysis.charFilter.get("myNormalizerChar");
String input = "ʰ㌰゙5℃№㈱㌘バッファーの正規化のテスト㋐㋑㋒㋓㋔カキクケコザジズゼゾg̈각/각நிเกषिchkʷक्षि";
Normalizer2 normalizer = Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE);
@ -61,8 +61,8 @@ public class SimpleIcuNormalizerCharFilterTests extends ESTestCase {
.put("index.analysis.char_filter.myNormalizerChar.name", "nfkc")
.put("index.analysis.char_filter.myNormalizerChar.mode", "decompose")
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
CharFilterFactory charFilterFactory = analysisService.charFilter("myNormalizerChar");
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
CharFilterFactory charFilterFactory = analysis.charFilter.get("myNormalizerChar");
String input = "ʰ㌰゙5℃№㈱㌘バッファーの正規化のテスト㋐㋑㋒㋓㋔カキクケコザジズゼゾg̈각/각நிเกषिchkʷक्षि";
Normalizer2 normalizer = Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.DECOMPOSE);

View File

@ -48,44 +48,45 @@ import static org.hamcrest.Matchers.notNullValue;
*/
public class KuromojiAnalysisTests extends ESTestCase {
public void testDefaultsKuromojiAnalysis() throws IOException {
AnalysisService analysisService = createAnalysisService();
TestAnalysis analysis = createTestAnalysis();
TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_tokenizer");
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_tokenizer");
assertThat(tokenizerFactory, instanceOf(KuromojiTokenizerFactory.class));
TokenFilterFactory filterFactory = analysisService.tokenFilter("kuromoji_part_of_speech");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("kuromoji_part_of_speech");
assertThat(filterFactory, instanceOf(KuromojiPartOfSpeechFilterFactory.class));
filterFactory = analysisService.tokenFilter("kuromoji_readingform");
filterFactory = analysis.tokenFilter.get("kuromoji_readingform");
assertThat(filterFactory, instanceOf(KuromojiReadingFormFilterFactory.class));
filterFactory = analysisService.tokenFilter("kuromoji_baseform");
filterFactory = analysis.tokenFilter.get("kuromoji_baseform");
assertThat(filterFactory, instanceOf(KuromojiBaseFormFilterFactory.class));
filterFactory = analysisService.tokenFilter("kuromoji_stemmer");
filterFactory = analysis.tokenFilter.get("kuromoji_stemmer");
assertThat(filterFactory, instanceOf(KuromojiKatakanaStemmerFactory.class));
filterFactory = analysisService.tokenFilter("ja_stop");
filterFactory = analysis.tokenFilter.get("ja_stop");
assertThat(filterFactory, instanceOf(JapaneseStopTokenFilterFactory.class));
filterFactory = analysisService.tokenFilter("kuromoji_number");
filterFactory = analysis.tokenFilter.get("kuromoji_number");
assertThat(filterFactory, instanceOf(KuromojiNumberFilterFactory.class));
NamedAnalyzer analyzer = analysisService.analyzer("kuromoji");
IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers;
NamedAnalyzer analyzer = indexAnalyzers.get("kuromoji");
assertThat(analyzer.analyzer(), instanceOf(JapaneseAnalyzer.class));
analyzer = analysisService.analyzer("my_analyzer");
analyzer = indexAnalyzers.get("my_analyzer");
assertThat(analyzer.analyzer(), instanceOf(CustomAnalyzer.class));
assertThat(analyzer.analyzer().tokenStream(null, new StringReader("")), instanceOf(JapaneseTokenizer.class));
CharFilterFactory charFilterFactory = analysisService.charFilter("kuromoji_iteration_mark");
CharFilterFactory charFilterFactory = analysis.charFilter.get("kuromoji_iteration_mark");
assertThat(charFilterFactory, instanceOf(KuromojiIterationMarkCharFilterFactory.class));
}
public void testBaseFormFilterFactory() throws IOException {
AnalysisService analysisService = createAnalysisService();
TokenFilterFactory tokenFilter = analysisService.tokenFilter("kuromoji_pos");
TestAnalysis analysis = createTestAnalysis();
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_pos");
assertThat(tokenFilter, instanceOf(KuromojiPartOfSpeechFilterFactory.class));
String source = "私は制限スピードを超える。";
String[] expected = new String[]{"", "", "制限", "スピード", ""};
@ -95,8 +96,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testReadingFormFilterFactory() throws IOException {
AnalysisService analysisService = createAnalysisService();
TokenFilterFactory tokenFilter = analysisService.tokenFilter("kuromoji_rf");
TestAnalysis analysis = createTestAnalysis();
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_rf");
assertThat(tokenFilter, instanceOf(KuromojiReadingFormFilterFactory.class));
String source = "今夜はロバート先生と話した";
String[] expected_tokens_romaji = new String[]{"kon'ya", "ha", "robato", "sensei", "to", "hanashi", "ta"};
@ -109,14 +110,14 @@ public class KuromojiAnalysisTests extends ESTestCase {
tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH);
tokenizer.setReader(new StringReader(source));
String[] expected_tokens_katakana = new String[]{"コンヤ", "", "ロバート", "センセイ", "", "ハナシ", ""};
tokenFilter = analysisService.tokenFilter("kuromoji_readingform");
tokenFilter = analysis.tokenFilter.get("kuromoji_readingform");
assertThat(tokenFilter, instanceOf(KuromojiReadingFormFilterFactory.class));
assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens_katakana);
}
public void testKatakanaStemFilter() throws IOException {
AnalysisService analysisService = createAnalysisService();
TokenFilterFactory tokenFilter = analysisService.tokenFilter("kuromoji_stemmer");
TestAnalysis analysis = createTestAnalysis();
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_stemmer");
assertThat(tokenFilter, instanceOf(KuromojiKatakanaStemmerFactory.class));
String source = "明後日パーティーに行く予定がある。図書館で資料をコピーしました。";
@ -128,7 +129,7 @@ public class KuromojiAnalysisTests extends ESTestCase {
String[] expected_tokens_katakana = new String[]{"明後日", "パーティ", "", "行く", "予定", "", "ある", "図書館", "", "資料", "", "コピー", "", "まし", ""};
assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens_katakana);
tokenFilter = analysisService.tokenFilter("kuromoji_ks");
tokenFilter = analysis.tokenFilter.get("kuromoji_ks");
assertThat(tokenFilter, instanceOf(KuromojiKatakanaStemmerFactory.class));
tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH);
tokenizer.setReader(new StringReader(source));
@ -140,9 +141,9 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testIterationMarkCharFilter() throws IOException {
AnalysisService analysisService = createAnalysisService();
TestAnalysis analysis = createTestAnalysis();
// test only kanji
CharFilterFactory charFilterFactory = analysisService.charFilter("kuromoji_im_only_kanji");
CharFilterFactory charFilterFactory = analysis.charFilter.get("kuromoji_im_only_kanji");
assertNotNull(charFilterFactory);
assertThat(charFilterFactory, instanceOf(KuromojiIterationMarkCharFilterFactory.class));
@ -153,7 +154,7 @@ public class KuromojiAnalysisTests extends ESTestCase {
// test only kana
charFilterFactory = analysisService.charFilter("kuromoji_im_only_kana");
charFilterFactory = analysis.charFilter.get("kuromoji_im_only_kana");
assertNotNull(charFilterFactory);
assertThat(charFilterFactory, instanceOf(KuromojiIterationMarkCharFilterFactory.class));
@ -163,7 +164,7 @@ public class KuromojiAnalysisTests extends ESTestCase {
// test default
charFilterFactory = analysisService.charFilter("kuromoji_im_default");
charFilterFactory = analysis.charFilter.get("kuromoji_im_default");
assertNotNull(charFilterFactory);
assertThat(charFilterFactory, instanceOf(KuromojiIterationMarkCharFilterFactory.class));
@ -173,8 +174,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testJapaneseStopFilterFactory() throws IOException {
AnalysisService analysisService = createAnalysisService();
TokenFilterFactory tokenFilter = analysisService.tokenFilter("ja_stop");
TestAnalysis analysis = createTestAnalysis();
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("ja_stop");
assertThat(tokenFilter, instanceOf(JapaneseStopTokenFilterFactory.class));
String source = "私は制限スピードを超える。";
String[] expected = new String[]{"", "制限", "超える"};
@ -183,7 +184,7 @@ public class KuromojiAnalysisTests extends ESTestCase {
assertSimpleTSOutput(tokenFilter.create(tokenizer), expected);
}
private static AnalysisService createAnalysisService() throws IOException {
private static TestAnalysis createTestAnalysis() throws IOException {
InputStream empty_dict = KuromojiAnalysisTests.class.getResourceAsStream("empty_user_dict.txt");
InputStream dict = KuromojiAnalysisTests.class.getResourceAsStream("user_dict.txt");
Path home = createTempDir();
@ -198,7 +199,7 @@ public class KuromojiAnalysisTests extends ESTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.build();
Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build();
return createAnalysisService(new Index("test", "_na_"), nodeSettings, settings, new AnalysisKuromojiPlugin());
return createTestAnalysis(new Index("test", "_na_"), nodeSettings, settings, new AnalysisKuromojiPlugin());
}
public static void assertSimpleTSOutput(TokenStream stream,
@ -230,8 +231,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testKuromojiUserDict() throws IOException {
AnalysisService analysisService = createAnalysisService();
TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_user_dict");
TestAnalysis analysis = createTestAnalysis();
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_user_dict");
String source = "私は制限スピードを超える。";
String[] expected = new String[]{"", "", "制限スピード", "", "超える"};
@ -242,14 +243,14 @@ public class KuromojiAnalysisTests extends ESTestCase {
// fix #59
public void testKuromojiEmptyUserDict() throws IOException {
AnalysisService analysisService = createAnalysisService();
TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_empty_user_dict");
TestAnalysis analysis = createTestAnalysis();
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_empty_user_dict");
assertThat(tokenizerFactory, instanceOf(KuromojiTokenizerFactory.class));
}
public void testNbestCost() throws IOException {
AnalysisService analysisService = createAnalysisService();
TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_nbest_cost");
TestAnalysis analysis = createTestAnalysis();
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_nbest_cost");
String source = "鳩山積み";
String[] expected = new String[] {"", "鳩山", "山積み", "積み"};
@ -259,8 +260,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testNbestExample() throws IOException {
AnalysisService analysisService = createAnalysisService();
TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_nbest_examples");
TestAnalysis analysis = createTestAnalysis();
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_nbest_examples");
String source = "鳩山積み";
String[] expected = new String[] {"", "鳩山", "山積み", "積み"};
@ -270,8 +271,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testNbestBothOptions() throws IOException {
AnalysisService analysisService = createAnalysisService();
TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_nbest_both");
TestAnalysis analysis = createTestAnalysis();
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_nbest_both");
String source = "鳩山積み";
String[] expected = new String[] {"", "鳩山", "山積み", "積み"};
@ -282,8 +283,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testNumberFilterFactory() throws Exception {
AnalysisService analysisService = createAnalysisService();
TokenFilterFactory tokenFilter = analysisService.tokenFilter("kuromoji_number");
TestAnalysis analysis = createTestAnalysis();
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_number");
assertThat(tokenFilter, instanceOf(KuromojiNumberFilterFactory.class));
String source = "本日十万二千五百円のワインを買った";
String[] expected = new String[]{"本日", "102500", "", "", "ワイン", "", "買っ", ""};

View File

@ -39,8 +39,8 @@ public class SimplePhoneticAnalysisTests extends ESTestCase {
Settings settings = Settings.builder().loadFromStream(yaml, getClass().getResourceAsStream(yaml))
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.build();
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisPhoneticPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("phonetic");
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisPhoneticPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("phonetic");
MatcherAssert.assertThat(filterFactory, instanceOf(PhoneticTokenFilterFactory.class));
}
}

View File

@ -31,9 +31,9 @@ import static org.hamcrest.Matchers.instanceOf;
public class SimpleSmartChineseAnalysisTests extends ESTestCase {
public void testDefaultsIcuAnalysis() throws IOException {
final AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY,
final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY,
new AnalysisSmartChinesePlugin());
TokenizerFactory tokenizerFactory = analysisService.tokenizer("smartcn_tokenizer");
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("smartcn_tokenizer");
MatcherAssert.assertThat(tokenizerFactory, instanceOf(SmartChineseTokenizerTokenizerFactory.class));
}
}

View File

@ -36,12 +36,12 @@ import static org.hamcrest.Matchers.instanceOf;
*/
public class PolishAnalysisTests extends ESTestCase {
public void testDefaultsPolishAnalysis() throws IOException {
final AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY,
final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY,
new AnalysisStempelPlugin());
TokenFilterFactory tokenizerFactory = analysisService.tokenFilter("polish_stem");
TokenFilterFactory tokenizerFactory = analysis.tokenFilter.get("polish_stem");
MatcherAssert.assertThat(tokenizerFactory, instanceOf(PolishStemTokenFilterFactory.class));
Analyzer analyzer = analysisService.analyzer("polish").analyzer();
Analyzer analyzer = analysis.indexAnalyzers.get("polish").analyzer();
MatcherAssert.assertThat(analyzer, instanceOf(PolishAnalyzer.class));
}
}

View File

@ -49,9 +49,9 @@ public class SimplePolishTokenFilterTests extends ESTestCase {
Settings settings = Settings.builder()
.put("index.analysis.filter.myStemmer.type", "polish_stem")
.build();
AnalysisService analysisService = createAnalysisService(index, settings, new AnalysisStempelPlugin());
TestAnalysis analysis = createTestAnalysis(index, settings, new AnalysisStempelPlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("myStemmer");
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myStemmer");
Tokenizer tokenizer = new KeywordTokenizer();
tokenizer.setReader(new StringReader(source));
@ -65,9 +65,9 @@ public class SimplePolishTokenFilterTests extends ESTestCase {
}
private void testAnalyzer(String source, String... expected_terms) throws IOException {
AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY, new AnalysisStempelPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisStempelPlugin());
Analyzer analyzer = analysisService.analyzer("polish").analyzer();
Analyzer analyzer = analysis.indexAnalyzers.get("polish").analyzer();
TokenStream ts = analyzer.tokenStream("test", source);

View File

@ -59,7 +59,7 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase {
Collections.singletonMap(Murmur3FieldMapper.CONTENT_TYPE, new Murmur3FieldMapper.TypeParser()),
Collections.emptyMap());
parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
}
@Override
@ -152,7 +152,7 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase {
Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
IndexService indexService2x = createIndex("test_old", oldIndexSettings);
DocumentMapperParser parser = new DocumentMapperParser(indexService2x.getIndexSettings(), indexService2x.mapperService(), indexService2x.analysisService(),
DocumentMapperParser parser = new DocumentMapperParser(indexService2x.getIndexSettings(), indexService2x.mapperService(), indexService2x.getIndexAnalyzers(),
indexService2x.similarityService(), mapperRegistry, indexService2x::newQueryShardContext);
DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping));

View File

@ -21,11 +21,9 @@ package org.elasticsearch.index;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.IndicesModule;
@ -36,7 +34,7 @@ import java.io.IOException;
import java.nio.file.Path;
import java.util.Collections;
import static org.elasticsearch.test.ESTestCase.createAnalysisService;
import static org.elasticsearch.test.ESTestCase.createTestAnalysis;
public class MapperTestUtils {
@ -56,10 +54,10 @@ public class MapperTestUtils {
Settings finalSettings = settingsBuilder.build();
MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", finalSettings);
AnalysisService analysisService = createAnalysisService(indexSettings, finalSettings);
IndexAnalyzers indexAnalyzers = createTestAnalysis(indexSettings, finalSettings).indexAnalyzers;
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
return new MapperService(indexSettings,
analysisService,
indexAnalyzers,
similarityService,
mapperRegistry,
() -> null);

View File

@ -65,7 +65,7 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
@ -1044,11 +1044,11 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
IndexScopedSettings indexScopedSettings = settingsModule.getIndexScopedSettings();
idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings, indexScopedSettings);
AnalysisModule analysisModule = new AnalysisModule(new Environment(nodeSettings), emptyList());
AnalysisService analysisService = analysisModule.getAnalysisRegistry().build(idxSettings);
IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings);
scriptService = scriptModule.getScriptService();
similarityService = new SimilarityService(idxSettings, Collections.emptyMap());
MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, this::createShardContext);
mapperService = new MapperService(idxSettings, indexAnalyzers, similarityService, mapperRegistry, this::createShardContext);
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() {
});
indexFieldDataService = new IndexFieldDataService(idxSettings, indicesFieldDataCache,

View File

@ -55,7 +55,11 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.CharFilterFactory;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.index.analysis.TokenizerFactory;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.indices.IndicesModule;
@ -810,35 +814,37 @@ public abstract class ESTestCase extends LuceneTestCase {
}
/**
* Creates an AnalysisService with all the default analyzers configured.
* Creates an TestAnalysis with all the default analyzers configured.
*/
public static AnalysisService createAnalysisService(Index index, Settings settings, AnalysisPlugin... analysisPlugins)
public static TestAnalysis createTestAnalysis(Index index, Settings settings, AnalysisPlugin... analysisPlugins)
throws IOException {
Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build();
return createAnalysisService(index, nodeSettings, settings, analysisPlugins);
return createTestAnalysis(index, nodeSettings, settings, analysisPlugins);
}
/**
* Creates an AnalysisService with all the default analyzers configured.
* Creates an TestAnalysis with all the default analyzers configured.
*/
public static AnalysisService createAnalysisService(Index index, Settings nodeSettings, Settings settings,
AnalysisPlugin... analysisPlugins) throws IOException {
public static TestAnalysis createTestAnalysis(Index index, Settings nodeSettings, Settings settings,
AnalysisPlugin... analysisPlugins) throws IOException {
Settings indexSettings = Settings.builder().put(settings)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.build();
return createAnalysisService(IndexSettingsModule.newIndexSettings(index, indexSettings), nodeSettings, analysisPlugins);
return createTestAnalysis(IndexSettingsModule.newIndexSettings(index, indexSettings), nodeSettings, analysisPlugins);
}
/**
* Creates an AnalysisService with all the default analyzers configured.
* Creates an TestAnalysis with all the default analyzers configured.
*/
public static AnalysisService createAnalysisService(IndexSettings indexSettings, Settings nodeSettings,
AnalysisPlugin... analysisPlugins) throws IOException {
public static TestAnalysis createTestAnalysis(IndexSettings indexSettings, Settings nodeSettings,
AnalysisPlugin... analysisPlugins) throws IOException {
Environment env = new Environment(nodeSettings);
AnalysisModule analysisModule = new AnalysisModule(env, Arrays.asList(analysisPlugins));
final AnalysisService analysisService = analysisModule.getAnalysisRegistry()
.build(indexSettings);
return analysisService;
AnalysisRegistry analysisRegistry = analysisModule.getAnalysisRegistry();
return new TestAnalysis(analysisRegistry.build(indexSettings),
analysisRegistry.buildTokenFilterFactories(indexSettings),
analysisRegistry.buildTokenizerFactories(indexSettings),
analysisRegistry.buildCharFilterFactories(indexSettings));
}
public static ScriptModule newTestScriptModule() {
@ -868,4 +874,27 @@ public abstract class ESTestCase extends LuceneTestCase {
}
));
}
/**
* This cute helper class just holds all analysis building blocks that are used
* to build IndexAnalyzers. This is only for testing since in production we only need the
* result and we don't even expose it there.
*/
public static final class TestAnalysis {
public final IndexAnalyzers indexAnalyzers;
public final Map<String, TokenFilterFactory> tokenFilter;
public final Map<String, TokenizerFactory> tokenizer;
public final Map<String, CharFilterFactory> charFilter;
public TestAnalysis(IndexAnalyzers indexAnalyzers,
Map<String, TokenFilterFactory> tokenFilter,
Map<String, TokenizerFactory> tokenizer,
Map<String, CharFilterFactory> charFilter) {
this.indexAnalyzers = indexAnalyzers;
this.tokenFilter = tokenFilter;
this.tokenizer = tokenizer;
this.charFilter = charFilter;
}
}
}

View File

@ -27,7 +27,6 @@ import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
@ -295,9 +294,6 @@ public class TestSearchContext extends SearchContext {
return null;
}
@Override
public AnalysisService analysisService() { return indexService.analysisService();}
@Override
public SimilarityService similarityService() {
return null;