Allow reloading of search time analyzers (#43313)
Currently changing resources (like dictionaries, synonym files etc...) of search time analyzers is only possible by closing an index, changing the underlying resource (e.g. synonym files) and then re-opening the index for the change to take effect. This PR adds a new API endpoint that allows triggering reloading of certain analysis resources (currently token filters) that will then pick up changes in underlying file resources. To achieve this we introduce a new type of custom analyzer (ReloadableCustomAnalyzer) that uses a ReuseStrategy that allows swapping out analysis components. Custom analyzers that contain filters that are markes as "updateable" will automatically choose this implementation. This PR also adds this capability to `synonym` token filters for use in search time analyzers. Relates to #29051
This commit is contained in:
parent
51b230f6ab
commit
2cc7f5a744
|
@ -730,8 +730,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
"indices.exists_type",
|
||||
"indices.get_upgrade",
|
||||
"indices.put_alias",
|
||||
"scripts_painless_execute",
|
||||
"render_search_template"
|
||||
"render_search_template",
|
||||
"scripts_painless_execute"
|
||||
};
|
||||
//These API are not required for high-level client feature completeness
|
||||
String[] notRequiredApi = new String[] {
|
||||
|
|
|
@ -43,6 +43,8 @@ Additional settings are:
|
|||
* `expand` (defaults to `true`).
|
||||
* `lenient` (defaults to `false`). If `true` ignores exceptions while parsing the synonym configuration. It is important
|
||||
to note that only those synonym rules which cannot get parsed are ignored. For instance consider the following request:
|
||||
|
||||
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[indices-reload-analyzers]]
|
||||
== Reload Search Analyzers
|
||||
|
||||
experimental[]
|
||||
|
||||
Reloads search analyzers and its resources.
|
||||
|
||||
Synonym filters (both `synonym` and `synonym_graph`) can be declared as
|
||||
updateable if they are only used in <<search-analyzer,search analyzers>>
|
||||
with the `updateable` flag:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
"analysis" : {
|
||||
"analyzer" : {
|
||||
"my_synonyms" : {
|
||||
"tokenizer" : "whitespace",
|
||||
"filter" : ["synonym"]
|
||||
}
|
||||
},
|
||||
"filter" : {
|
||||
"synonym" : {
|
||||
"type" : "synonym",
|
||||
"synonyms_path" : "analysis/synonym.txt",
|
||||
"updateable" : true <1>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"text": {
|
||||
"type": "text",
|
||||
"analyzer" : "standard",
|
||||
"search_analyzer": "my_synonyms" <2>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
<1> Mark the synonym filter as updateable.
|
||||
<2> Synonym analyzer is usable as a search_analyzer.
|
||||
|
||||
NOTE: Trying to use the above analyzer as an index analyzer will result in an error.
|
||||
|
||||
Using the <<indices-reload-analyzers,analyzer reload API>>, you can trigger reloading of the
|
||||
synonym definition. The contents of the configured synonyms file will be reloaded and the
|
||||
synonyms definition the filter uses will be updated.
|
||||
|
||||
The `_reload_search_analyzers` API can be run on one or more indices and will trigger
|
||||
reloading of the synonyms from the configured file.
|
||||
|
||||
NOTE: Reloading will happen on every node the index has shards, so its important
|
||||
to update the synonym file contents on every data node (even the ones that don't currently
|
||||
hold shard copies; shards might be relocated there in the future) before calling
|
||||
reload to ensure the new state of the file is reflected everywhere in the cluster.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /my_index/_reload_search_analyzers
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT my_index\n/]
|
|
@ -15,6 +15,7 @@ not be included yet.
|
|||
* <<data-frame-apis,{dataframe-cap} APIs>>
|
||||
* <<graph-explore-api,Graph Explore API>>
|
||||
* <<freeze-index-api>>, <<unfreeze-index-api>>
|
||||
* <<indices-reload-analyzers,Reload Search Analyzers API>>
|
||||
* <<index-lifecycle-management-api,Index lifecycle management APIs>>
|
||||
* <<licensing-apis,Licensing APIs>>
|
||||
* <<ml-apis,Machine Learning APIs>>
|
||||
|
@ -38,4 +39,5 @@ include::{es-repo-dir}/rollup/rollup-api.asciidoc[]
|
|||
include::{xes-repo-dir}/rest-api/security.asciidoc[]
|
||||
include::{es-repo-dir}/indices/apis/unfreeze.asciidoc[]
|
||||
include::{xes-repo-dir}/rest-api/watcher.asciidoc[]
|
||||
include::{es-repo-dir}/indices/apis/reload-analyzers.asciidoc[]
|
||||
include::defs.asciidoc[]
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.env.Environment;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.Analysis;
|
||||
import org.elasticsearch.index.analysis.AnalysisMode;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
|
@ -50,6 +51,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private final boolean lenient;
|
||||
protected final Settings settings;
|
||||
protected final Environment environment;
|
||||
private final boolean updateable;
|
||||
|
||||
SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env,
|
||||
String name, Settings settings) {
|
||||
|
@ -65,9 +67,15 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
this.expand = settings.getAsBoolean("expand", true);
|
||||
this.lenient = settings.getAsBoolean("lenient", false);
|
||||
this.format = settings.get("format", "");
|
||||
this.updateable = settings.getAsBoolean("updateable", false);
|
||||
this.environment = env;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnalysisMode getAnalysisMode() {
|
||||
return this.updateable ? AnalysisMode.SEARCH_TIME : AnalysisMode.ALL;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
throw new IllegalStateException("Call createPerAnalyzerSynonymFactory to specialize this factory for an analysis chain first");
|
||||
|
@ -98,6 +106,11 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
// which doesn't support stacked input tokens
|
||||
return IDENTITY_FILTER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnalysisMode getAnalysisMode() {
|
||||
return updateable ? AnalysisMode.SEARCH_TIME : AnalysisMode.ALL;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -42,8 +42,9 @@ import org.elasticsearch.core.internal.io.IOUtils;
|
|||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.analysis.AnalyzerComponents;
|
||||
import org.elasticsearch.index.analysis.AnalyzerComponentsProvider;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.NameOrDefinition;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
|
@ -261,18 +262,23 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeAc
|
|||
}
|
||||
}
|
||||
|
||||
CustomAnalyzer customAnalyzer = null;
|
||||
if (analyzer instanceof CustomAnalyzer) {
|
||||
customAnalyzer = (CustomAnalyzer) analyzer;
|
||||
} else if (analyzer instanceof NamedAnalyzer && ((NamedAnalyzer) analyzer).analyzer() instanceof CustomAnalyzer) {
|
||||
customAnalyzer = (CustomAnalyzer) ((NamedAnalyzer) analyzer).analyzer();
|
||||
// maybe unwrap analyzer from NamedAnalyzer
|
||||
Analyzer potentialCustomAnalyzer = analyzer;
|
||||
if (analyzer instanceof NamedAnalyzer) {
|
||||
potentialCustomAnalyzer = ((NamedAnalyzer) analyzer).analyzer();
|
||||
}
|
||||
|
||||
if (customAnalyzer != null) {
|
||||
// customAnalyzer = divide charfilter, tokenizer tokenfilters
|
||||
CharFilterFactory[] charFilterFactories = customAnalyzer.charFilters();
|
||||
TokenizerFactory tokenizerFactory = customAnalyzer.tokenizerFactory();
|
||||
TokenFilterFactory[] tokenFilterFactories = customAnalyzer.tokenFilters();
|
||||
if (potentialCustomAnalyzer instanceof AnalyzerComponentsProvider) {
|
||||
AnalyzerComponentsProvider customAnalyzer = (AnalyzerComponentsProvider) potentialCustomAnalyzer;
|
||||
// note: this is not field-name dependent in our cases so we can leave out the argument
|
||||
int positionIncrementGap = potentialCustomAnalyzer.getPositionIncrementGap("");
|
||||
int offsetGap = potentialCustomAnalyzer.getOffsetGap("");
|
||||
AnalyzerComponents components = customAnalyzer.getComponents();
|
||||
// divide charfilter, tokenizer tokenfilters
|
||||
CharFilterFactory[] charFilterFactories = components.getCharFilters();
|
||||
TokenizerFactory tokenizerFactory = components.getTokenizerFactory();
|
||||
TokenFilterFactory[] tokenFilterFactories = components.getTokenFilters();
|
||||
String tokenizerName = components.getTokenizerName();
|
||||
|
||||
String[][] charFiltersTexts = new String[charFilterFactories != null ? charFilterFactories.length : 0][request.text().length];
|
||||
TokenListCreator[] tokenFiltersTokenListCreator = new TokenListCreator[tokenFilterFactories != null ?
|
||||
|
@ -298,7 +304,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeAc
|
|||
// analyzing only tokenizer
|
||||
Tokenizer tokenizer = tokenizerFactory.create();
|
||||
tokenizer.setReader(reader);
|
||||
tokenizerTokenListCreator.analyze(tokenizer, customAnalyzer, includeAttributes);
|
||||
tokenizerTokenListCreator.analyze(tokenizer, includeAttributes, positionIncrementGap, offsetGap);
|
||||
|
||||
// analyzing each tokenfilter
|
||||
if (tokenFilterFactories != null) {
|
||||
|
@ -308,7 +314,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeAc
|
|||
}
|
||||
TokenStream stream = createStackedTokenStream(request.text()[textIndex],
|
||||
charFilterFactories, tokenizerFactory, tokenFilterFactories, tokenFilterIndex + 1);
|
||||
tokenFiltersTokenListCreator[tokenFilterIndex].analyze(stream, customAnalyzer, includeAttributes);
|
||||
tokenFiltersTokenListCreator[tokenFilterIndex].analyze(stream, includeAttributes, positionIncrementGap, offsetGap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -331,8 +337,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeAc
|
|||
tokenFilterFactories[tokenFilterIndex].name(), tokenFiltersTokenListCreator[tokenFilterIndex].getArrayTokens());
|
||||
}
|
||||
}
|
||||
detailResponse = new AnalyzeAction.DetailAnalyzeResponse(charFilteredLists, new AnalyzeAction.AnalyzeTokenList(
|
||||
customAnalyzer.getTokenizerName(), tokenizerTokenListCreator.getArrayTokens()), tokenFilterLists);
|
||||
detailResponse = new AnalyzeAction.DetailAnalyzeResponse(charFilteredLists,
|
||||
new AnalyzeAction.AnalyzeTokenList(tokenizerName, tokenizerTokenListCreator.getArrayTokens()), tokenFilterLists);
|
||||
} else {
|
||||
String name;
|
||||
if (analyzer instanceof NamedAnalyzer) {
|
||||
|
@ -343,8 +349,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeAc
|
|||
|
||||
TokenListCreator tokenListCreator = new TokenListCreator(maxTokenCount);
|
||||
for (String text : request.text()) {
|
||||
tokenListCreator.analyze(analyzer.tokenStream("", text), analyzer,
|
||||
includeAttributes);
|
||||
tokenListCreator.analyze(analyzer.tokenStream("", text), includeAttributes, analyzer.getPositionIncrementGap(""),
|
||||
analyzer.getOffsetGap(""));
|
||||
}
|
||||
detailResponse
|
||||
= new AnalyzeAction.DetailAnalyzeResponse(new AnalyzeAction.AnalyzeTokenList(name, tokenListCreator.getArrayTokens()));
|
||||
|
@ -414,7 +420,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeAc
|
|||
tc = new TokenCounter(maxTokenCount);
|
||||
}
|
||||
|
||||
private void analyze(TokenStream stream, Analyzer analyzer, Set<String> includeAttributes) {
|
||||
private void analyze(TokenStream stream, Set<String> includeAttributes, int positionIncrementGap, int offsetGap) {
|
||||
try {
|
||||
stream.reset();
|
||||
CharTermAttribute term = stream.addAttribute(CharTermAttribute.class);
|
||||
|
@ -437,8 +443,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeAc
|
|||
lastOffset += offset.endOffset();
|
||||
lastPosition += posIncr.getPositionIncrement();
|
||||
|
||||
lastPosition += analyzer.getPositionIncrementGap("");
|
||||
lastOffset += analyzer.getOffsetGap("");
|
||||
lastPosition += positionIncrementGap;
|
||||
lastOffset += offsetGap;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("failed to analyze", e);
|
||||
|
|
|
@ -818,4 +818,5 @@ public interface IndicesAdminClient extends ElasticsearchClient {
|
|||
* Swaps the index pointed to by an alias given all provided conditions are satisfied
|
||||
*/
|
||||
void rolloverIndex(RolloverRequest request, ActionListener<RolloverResponse> listener);
|
||||
|
||||
}
|
||||
|
|
|
@ -534,5 +534,4 @@ public class Requests {
|
|||
public static SnapshotsStatusRequest snapshotsStatusRequest(String repository) {
|
||||
return new SnapshotsStatusRequest(repository);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -527,7 +527,6 @@ public final class AnalysisRegistry implements Closeable {
|
|||
Map<String, TokenizerFactory> tokenizerFactoryFactories,
|
||||
Map<String, CharFilterFactory> charFilterFactoryFactories,
|
||||
Map<String, TokenFilterFactory> tokenFilterFactoryFactories) {
|
||||
|
||||
Map<String, NamedAnalyzer> analyzers = new HashMap<>();
|
||||
Map<String, NamedAnalyzer> normalizers = new HashMap<>();
|
||||
Map<String, NamedAnalyzer> whitespaceNormalizers = new HashMap<>();
|
||||
|
@ -569,9 +568,11 @@ public final class AnalysisRegistry implements Closeable {
|
|||
return new IndexAnalyzers(analyzers, normalizers, whitespaceNormalizers);
|
||||
}
|
||||
|
||||
private static NamedAnalyzer produceAnalyzer(String name, AnalyzerProvider<?> analyzerFactory,
|
||||
Map<String, TokenFilterFactory> tokenFilters, Map<String, CharFilterFactory> charFilters,
|
||||
Map<String, TokenizerFactory> tokenizers) {
|
||||
private static NamedAnalyzer produceAnalyzer(String name,
|
||||
AnalyzerProvider<?> analyzerFactory,
|
||||
Map<String, TokenFilterFactory> tokenFilters,
|
||||
Map<String, CharFilterFactory> charFilters,
|
||||
Map<String, TokenizerFactory> tokenizers) {
|
||||
/*
|
||||
* Lucene defaults positionIncrementGap to 0 in all analyzers but
|
||||
* Elasticsearch defaults them to 0 only before version 2.0
|
||||
|
|
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A class that groups analysis components necessary to produce a custom analyzer.
|
||||
* See {@link ReloadableCustomAnalyzer} for an example usage.
|
||||
*/
|
||||
public final class AnalyzerComponents {
|
||||
private final String tokenizerName;
|
||||
private final TokenizerFactory tokenizerFactory;
|
||||
private final CharFilterFactory[] charFilters;
|
||||
private final TokenFilterFactory[] tokenFilters;
|
||||
private final AnalysisMode analysisMode;
|
||||
|
||||
AnalyzerComponents(String tokenizerName, TokenizerFactory tokenizerFactory, CharFilterFactory[] charFilters,
|
||||
TokenFilterFactory[] tokenFilters) {
|
||||
this.tokenizerName = tokenizerName;
|
||||
this.tokenizerFactory = tokenizerFactory;
|
||||
this.charFilters = charFilters;
|
||||
this.tokenFilters = tokenFilters;
|
||||
AnalysisMode mode = AnalysisMode.ALL;
|
||||
for (TokenFilterFactory f : tokenFilters) {
|
||||
mode = mode.merge(f.getAnalysisMode());
|
||||
}
|
||||
this.analysisMode = mode;
|
||||
}
|
||||
|
||||
static AnalyzerComponents createComponents(String name, Settings analyzerSettings, final Map<String, TokenizerFactory> tokenizers,
|
||||
final Map<String, CharFilterFactory> charFilters, final Map<String, TokenFilterFactory> tokenFilters) {
|
||||
String tokenizerName = analyzerSettings.get("tokenizer");
|
||||
if (tokenizerName == null) {
|
||||
throw new IllegalArgumentException("Custom Analyzer [" + name + "] must be configured with a tokenizer");
|
||||
}
|
||||
|
||||
TokenizerFactory tokenizer = tokenizers.get(tokenizerName);
|
||||
if (tokenizer == null) {
|
||||
throw new IllegalArgumentException(
|
||||
"Custom Analyzer [" + name + "] failed to find tokenizer under name " + "[" + tokenizerName + "]");
|
||||
}
|
||||
|
||||
List<String> charFilterNames = analyzerSettings.getAsList("char_filter");
|
||||
List<CharFilterFactory> charFiltersList = new ArrayList<>(charFilterNames.size());
|
||||
for (String charFilterName : charFilterNames) {
|
||||
CharFilterFactory charFilter = charFilters.get(charFilterName);
|
||||
if (charFilter == null) {
|
||||
throw new IllegalArgumentException(
|
||||
"Custom Analyzer [" + name + "] failed to find char_filter under name " + "[" + charFilterName + "]");
|
||||
}
|
||||
charFiltersList.add(charFilter);
|
||||
}
|
||||
|
||||
List<String> tokenFilterNames = analyzerSettings.getAsList("filter");
|
||||
List<TokenFilterFactory> tokenFilterList = new ArrayList<>(tokenFilterNames.size());
|
||||
for (String tokenFilterName : tokenFilterNames) {
|
||||
TokenFilterFactory tokenFilter = tokenFilters.get(tokenFilterName);
|
||||
if (tokenFilter == null) {
|
||||
throw new IllegalArgumentException(
|
||||
"Custom Analyzer [" + name + "] failed to find filter under name " + "[" + tokenFilterName + "]");
|
||||
}
|
||||
tokenFilter = tokenFilter.getChainAwareTokenFilterFactory(tokenizer, charFiltersList, tokenFilterList, tokenFilters::get);
|
||||
tokenFilterList.add(tokenFilter);
|
||||
}
|
||||
|
||||
return new AnalyzerComponents(tokenizerName, tokenizer, charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]),
|
||||
tokenFilterList.toArray(new TokenFilterFactory[tokenFilterList.size()]));
|
||||
}
|
||||
|
||||
public String getTokenizerName() {
|
||||
return tokenizerName;
|
||||
}
|
||||
|
||||
public TokenizerFactory getTokenizerFactory() {
|
||||
return tokenizerFactory;
|
||||
}
|
||||
|
||||
public TokenFilterFactory[] getTokenFilters() {
|
||||
return tokenFilters;
|
||||
}
|
||||
|
||||
public CharFilterFactory[] getCharFilters() {
|
||||
return charFilters;
|
||||
}
|
||||
|
||||
public AnalysisMode analysisMode() {
|
||||
return this.analysisMode;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
/**
|
||||
* Analyzers that provide access to their token filters should implement this
|
||||
*/
|
||||
public interface AnalyzerComponentsProvider {
|
||||
|
||||
AnalyzerComponents getComponents();
|
||||
|
||||
}
|
|
@ -25,15 +25,9 @@ import org.apache.lucene.analysis.Tokenizer;
|
|||
|
||||
import java.io.Reader;
|
||||
|
||||
public final class CustomAnalyzer extends Analyzer {
|
||||
|
||||
private final String tokenizerName;
|
||||
private final TokenizerFactory tokenizerFactory;
|
||||
|
||||
private final CharFilterFactory[] charFilters;
|
||||
|
||||
private final TokenFilterFactory[] tokenFilters;
|
||||
public final class CustomAnalyzer extends Analyzer implements AnalyzerComponentsProvider {
|
||||
|
||||
private final AnalyzerComponents components;
|
||||
private final int positionIncrementGap;
|
||||
private final int offsetGap;
|
||||
private final AnalysisMode analysisMode;
|
||||
|
@ -45,10 +39,7 @@ public final class CustomAnalyzer extends Analyzer {
|
|||
|
||||
public CustomAnalyzer(String tokenizerName, TokenizerFactory tokenizerFactory, CharFilterFactory[] charFilters,
|
||||
TokenFilterFactory[] tokenFilters, int positionIncrementGap, int offsetGap) {
|
||||
this.tokenizerName = tokenizerName;
|
||||
this.tokenizerFactory = tokenizerFactory;
|
||||
this.charFilters = charFilters;
|
||||
this.tokenFilters = tokenFilters;
|
||||
this.components = new AnalyzerComponents(tokenizerName, tokenizerFactory, charFilters, tokenFilters);
|
||||
this.positionIncrementGap = positionIncrementGap;
|
||||
this.offsetGap = offsetGap;
|
||||
// merge and transfer token filter analysis modes with analyzer
|
||||
|
@ -63,19 +54,19 @@ public final class CustomAnalyzer extends Analyzer {
|
|||
* The name of the tokenizer as configured by the user.
|
||||
*/
|
||||
public String getTokenizerName() {
|
||||
return tokenizerName;
|
||||
return this.components.getTokenizerName();
|
||||
}
|
||||
|
||||
public TokenizerFactory tokenizerFactory() {
|
||||
return tokenizerFactory;
|
||||
return this.components.getTokenizerFactory();
|
||||
}
|
||||
|
||||
public TokenFilterFactory[] tokenFilters() {
|
||||
return tokenFilters;
|
||||
return this.components.getTokenFilters();
|
||||
}
|
||||
|
||||
public CharFilterFactory[] charFilters() {
|
||||
return charFilters;
|
||||
return this.components.getCharFilters();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -95,11 +86,16 @@ public final class CustomAnalyzer extends Analyzer {
|
|||
return this.analysisMode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnalyzerComponents getComponents() {
|
||||
return this.components;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
Tokenizer tokenizer = tokenizerFactory.create();
|
||||
Tokenizer tokenizer = this.tokenizerFactory().create();
|
||||
TokenStream tokenStream = tokenizer;
|
||||
for (TokenFilterFactory tokenFilter : tokenFilters) {
|
||||
for (TokenFilterFactory tokenFilter : tokenFilters()) {
|
||||
tokenStream = tokenFilter.create(tokenStream);
|
||||
}
|
||||
return new TokenStreamComponents(tokenizer, tokenStream);
|
||||
|
@ -107,6 +103,7 @@ public final class CustomAnalyzer extends Analyzer {
|
|||
|
||||
@Override
|
||||
protected Reader initReader(String fieldName, Reader reader) {
|
||||
CharFilterFactory[] charFilters = charFilters();
|
||||
if (charFilters != null && charFilters.length > 0) {
|
||||
for (CharFilterFactory charFilter : charFilters) {
|
||||
reader = charFilter.create(reader);
|
||||
|
@ -117,18 +114,18 @@ public final class CustomAnalyzer extends Analyzer {
|
|||
|
||||
@Override
|
||||
protected Reader initReaderForNormalization(String fieldName, Reader reader) {
|
||||
for (CharFilterFactory charFilter : charFilters) {
|
||||
reader = charFilter.normalize(reader);
|
||||
}
|
||||
return reader;
|
||||
for (CharFilterFactory charFilter : charFilters()) {
|
||||
reader = charFilter.normalize(reader);
|
||||
}
|
||||
return reader;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TokenStream normalize(String fieldName, TokenStream in) {
|
||||
TokenStream result = in;
|
||||
for (TokenFilterFactory filter : tokenFilters) {
|
||||
result = filter.normalize(result);
|
||||
}
|
||||
return result;
|
||||
TokenStream result = in;
|
||||
for (TokenFilterFactory filter : tokenFilters()) {
|
||||
result = filter.normalize(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,23 +19,24 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.analysis.AnalyzerComponents.createComponents;
|
||||
|
||||
/**
|
||||
* A custom analyzer that is built out of a single {@link org.apache.lucene.analysis.Tokenizer} and a list
|
||||
* of {@link org.apache.lucene.analysis.TokenFilter}s.
|
||||
*/
|
||||
public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<CustomAnalyzer> {
|
||||
public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analyzer> {
|
||||
|
||||
private final Settings analyzerSettings;
|
||||
|
||||
private CustomAnalyzer customAnalyzer;
|
||||
private Analyzer customAnalyzer;
|
||||
|
||||
public CustomAnalyzerProvider(IndexSettings indexSettings,
|
||||
String name, Settings settings) {
|
||||
|
@ -43,58 +44,33 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
|
|||
this.analyzerSettings = settings;
|
||||
}
|
||||
|
||||
void build(final Map<String, TokenizerFactory> tokenizers, final Map<String, CharFilterFactory> charFilters,
|
||||
final Map<String, TokenFilterFactory> tokenFilters) {
|
||||
String tokenizerName = analyzerSettings.get("tokenizer");
|
||||
if (tokenizerName == null) {
|
||||
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] must be configured with a tokenizer");
|
||||
}
|
||||
|
||||
TokenizerFactory tokenizer = tokenizers.get(tokenizerName);
|
||||
if (tokenizer == null) {
|
||||
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find tokenizer under name " +
|
||||
"[" + tokenizerName + "]");
|
||||
}
|
||||
|
||||
List<String> charFilterNames = analyzerSettings.getAsList("char_filter");
|
||||
List<CharFilterFactory> charFiltersList = new ArrayList<>(charFilterNames.size());
|
||||
for (String charFilterName : charFilterNames) {
|
||||
CharFilterFactory charFilter = charFilters.get(charFilterName);
|
||||
if (charFilter == null) {
|
||||
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find char_filter under name " +
|
||||
"[" + charFilterName + "]");
|
||||
}
|
||||
charFiltersList.add(charFilter);
|
||||
}
|
||||
void build(final Map<String, TokenizerFactory> tokenizers,
|
||||
final Map<String, CharFilterFactory> charFilters,
|
||||
final Map<String, TokenFilterFactory> tokenFilters) {
|
||||
customAnalyzer = create(name(), analyzerSettings, tokenizers, charFilters, tokenFilters);
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory method that either returns a plain {@link ReloadableCustomAnalyzer} if the components used for creation are supporting index
|
||||
* and search time use, or a {@link ReloadableCustomAnalyzer} if the components are intended for search time use only.
|
||||
*/
|
||||
private static Analyzer create(String name, Settings analyzerSettings, Map<String, TokenizerFactory> tokenizers,
|
||||
Map<String, CharFilterFactory> charFilters,
|
||||
Map<String, TokenFilterFactory> tokenFilters) {
|
||||
int positionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
|
||||
|
||||
positionIncrementGap = analyzerSettings.getAsInt("position_increment_gap", positionIncrementGap);
|
||||
|
||||
int offsetGap = analyzerSettings.getAsInt("offset_gap", -1);
|
||||
|
||||
List<String> tokenFilterNames = analyzerSettings.getAsList("filter");
|
||||
List<TokenFilterFactory> tokenFilterList = new ArrayList<>(tokenFilterNames.size());
|
||||
for (String tokenFilterName : tokenFilterNames) {
|
||||
TokenFilterFactory tokenFilter = tokenFilters.get(tokenFilterName);
|
||||
if (tokenFilter == null) {
|
||||
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find filter under name " +
|
||||
"[" + tokenFilterName + "]");
|
||||
}
|
||||
tokenFilter = tokenFilter.getChainAwareTokenFilterFactory(tokenizer, charFiltersList, tokenFilterList, tokenFilters::get);
|
||||
tokenFilterList.add(tokenFilter);
|
||||
AnalyzerComponents components = createComponents(name, analyzerSettings, tokenizers, charFilters, tokenFilters);
|
||||
if (components.analysisMode().equals(AnalysisMode.SEARCH_TIME)) {
|
||||
return new ReloadableCustomAnalyzer(components, positionIncrementGap, offsetGap);
|
||||
} else {
|
||||
return new CustomAnalyzer(components.getTokenizerName(), components.getTokenizerFactory(), components.getCharFilters(),
|
||||
components.getTokenFilters(), positionIncrementGap, offsetGap);
|
||||
}
|
||||
|
||||
this.customAnalyzer = new CustomAnalyzer(tokenizerName, tokenizer,
|
||||
charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]),
|
||||
tokenFilterList.toArray(new TokenFilterFactory[tokenFilterList.size()]),
|
||||
positionIncrementGap,
|
||||
offsetGap
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CustomAnalyzer get() {
|
||||
public Analyzer get() {
|
||||
return this.customAnalyzer;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,6 +62,13 @@ public final class IndexAnalyzers implements Closeable {
|
|||
return analyzers.get(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an (unmodifiable) map of containing the index analyzers
|
||||
*/
|
||||
public Map<String, NamedAnalyzer> getAnalyzers() {
|
||||
return analyzers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a normalizer mapped to the given name or <code>null</code> if not present
|
||||
*/
|
||||
|
|
|
@ -112,8 +112,8 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper {
|
|||
return; // everything allowed if this analyzer is in ALL mode
|
||||
}
|
||||
if (this.getAnalysisMode() != mode) {
|
||||
if (analyzer instanceof CustomAnalyzer) {
|
||||
TokenFilterFactory[] tokenFilters = ((CustomAnalyzer) analyzer).tokenFilters();
|
||||
if (analyzer instanceof AnalyzerComponentsProvider) {
|
||||
TokenFilterFactory[] tokenFilters = ((AnalyzerComponentsProvider) analyzer).getComponents().getTokenFilters();
|
||||
List<String> offendingFilters = new ArrayList<>();
|
||||
for (TokenFilterFactory tokenFilter : tokenFilters) {
|
||||
if (tokenFilter.getAnalysisMode() != mode) {
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.util.CloseableThreadLocal;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.Reader;
|
||||
import java.util.Map;
|
||||
|
||||
public final class ReloadableCustomAnalyzer extends Analyzer implements AnalyzerComponentsProvider {
|
||||
|
||||
private volatile AnalyzerComponents components;
|
||||
|
||||
private CloseableThreadLocal<AnalyzerComponents> storedComponents = new CloseableThreadLocal<>();
|
||||
|
||||
private final int positionIncrementGap;
|
||||
|
||||
private final int offsetGap;
|
||||
|
||||
/**
|
||||
* An alternative {@link ReuseStrategy} that allows swapping the stored analyzer components when they change.
|
||||
* This is used to change e.g. token filters in search time analyzers.
|
||||
*/
|
||||
private static final ReuseStrategy UPDATE_STRATEGY = new ReuseStrategy() {
|
||||
@Override
|
||||
public TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName) {
|
||||
ReloadableCustomAnalyzer custom = (ReloadableCustomAnalyzer) analyzer;
|
||||
AnalyzerComponents components = custom.getComponents();
|
||||
AnalyzerComponents storedComponents = custom.getStoredComponents();
|
||||
if (storedComponents == null || components != storedComponents) {
|
||||
custom.setStoredComponents(components);
|
||||
return null;
|
||||
}
|
||||
TokenStreamComponents tokenStream = (TokenStreamComponents) getStoredValue(analyzer);
|
||||
assert tokenStream != null;
|
||||
return tokenStream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setReusableComponents(Analyzer analyzer, String fieldName, TokenStreamComponents tokenStream) {
|
||||
setStoredValue(analyzer, tokenStream);
|
||||
}
|
||||
};
|
||||
|
||||
ReloadableCustomAnalyzer(AnalyzerComponents components, int positionIncrementGap, int offsetGap) {
|
||||
super(UPDATE_STRATEGY);
|
||||
if (components.analysisMode().equals(AnalysisMode.SEARCH_TIME) == false) {
|
||||
throw new IllegalArgumentException(
|
||||
"ReloadableCustomAnalyzer must only be initialized with analysis components in AnalysisMode.SEARCH_TIME mode");
|
||||
}
|
||||
this.components = components;
|
||||
this.positionIncrementGap = positionIncrementGap;
|
||||
this.offsetGap = offsetGap;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnalyzerComponents getComponents() {
|
||||
return this.components;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPositionIncrementGap(String fieldName) {
|
||||
return this.positionIncrementGap;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getOffsetGap(String field) {
|
||||
if (this.offsetGap < 0) {
|
||||
return super.getOffsetGap(field);
|
||||
}
|
||||
return this.offsetGap;
|
||||
}
|
||||
|
||||
public AnalysisMode getAnalysisMode() {
|
||||
return this.components.analysisMode();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Reader initReaderForNormalization(String fieldName, Reader reader) {
|
||||
final AnalyzerComponents components = getComponents();
|
||||
for (CharFilterFactory charFilter : components.getCharFilters()) {
|
||||
reader = charFilter.normalize(reader);
|
||||
}
|
||||
return reader;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TokenStream normalize(String fieldName, TokenStream in) {
|
||||
final AnalyzerComponents components = getComponents();
|
||||
TokenStream result = in;
|
||||
for (TokenFilterFactory filter : components.getTokenFilters()) {
|
||||
result = filter.normalize(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public synchronized void reload(String name,
|
||||
Settings settings,
|
||||
final Map<String, TokenizerFactory> tokenizers,
|
||||
final Map<String, CharFilterFactory> charFilters,
|
||||
final Map<String, TokenFilterFactory> tokenFilters) {
|
||||
AnalyzerComponents components = AnalyzerComponents.createComponents(name, settings, tokenizers, charFilters, tokenFilters);
|
||||
this.components = components;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
super.close();
|
||||
storedComponents.close();
|
||||
}
|
||||
|
||||
private void setStoredComponents(AnalyzerComponents components) {
|
||||
storedComponents.set(components);
|
||||
}
|
||||
|
||||
private AnalyzerComponents getStoredComponents() {
|
||||
return storedComponents.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
final AnalyzerComponents components = getStoredComponents();
|
||||
Tokenizer tokenizer = components.getTokenizerFactory().create();
|
||||
TokenStream tokenStream = tokenizer;
|
||||
for (TokenFilterFactory tokenFilter : components.getTokenFilters()) {
|
||||
tokenStream = tokenFilter.create(tokenStream);
|
||||
}
|
||||
return new TokenStreamComponents(tokenizer, tokenStream);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Reader initReader(String fieldName, Reader reader) {
|
||||
final AnalyzerComponents components = getStoredComponents();
|
||||
if (components.getCharFilters() != null && components.getCharFilters().length > 0) {
|
||||
for (CharFilterFactory charFilter : components.getCharFilters()) {
|
||||
reader = charFilter.create(reader);
|
||||
}
|
||||
}
|
||||
return reader;
|
||||
}
|
||||
}
|
|
@ -82,7 +82,6 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
|
|||
return new FieldTypeLookup(fullName, aliases);
|
||||
}
|
||||
|
||||
|
||||
/** Returns the field for the given field */
|
||||
public MappedFieldType get(String field) {
|
||||
String concreteField = aliasToConcreteName.getOrDefault(field, field);
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.common.logging.DeprecationLogger;
|
|||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -46,8 +47,13 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.IndexSortConfig;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.ReloadableCustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
|
@ -843,4 +849,20 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
return defaultAnalyzer;
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void reloadSearchAnalyzers(AnalysisRegistry registry) throws IOException {
|
||||
logger.info("reloading search analyzers");
|
||||
// refresh indexAnalyzers and search analyzers
|
||||
final Map<String, TokenizerFactory> tokenizerFactories = registry.buildTokenizerFactories(indexSettings);
|
||||
final Map<String, CharFilterFactory> charFilterFactories = registry.buildCharFilterFactories(indexSettings);
|
||||
final Map<String, TokenFilterFactory> tokenFilterFactories = registry.buildTokenFilterFactories(indexSettings);
|
||||
final Map<String, Settings> settings = indexSettings.getSettings().getGroups("index.analysis.analyzer");
|
||||
for (NamedAnalyzer namedAnalyzer : indexAnalyzers.getAnalyzers().values()) {
|
||||
if (namedAnalyzer.analyzer() instanceof ReloadableCustomAnalyzer) {
|
||||
ReloadableCustomAnalyzer analyzer = (ReloadableCustomAnalyzer) namedAnalyzer.analyzer();
|
||||
Settings analyzerSettings = settings.get(namedAnalyzer.name());
|
||||
analyzer.reload(namedAnalyzer.name(), analyzerSettings, tokenizerFactories, charFilterFactories, tokenFilterFactories);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo;
|
|||
import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo.SubInfo;
|
||||
import org.apache.lucene.search.vectorhighlight.FragmentsBuilder;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.AnalyzerComponentsProvider;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
|
@ -81,9 +81,8 @@ public final class FragmentBuilderHelper {
|
|||
if (analyzer instanceof NamedAnalyzer) {
|
||||
analyzer = ((NamedAnalyzer) analyzer).analyzer();
|
||||
}
|
||||
if (analyzer instanceof CustomAnalyzer) {
|
||||
final CustomAnalyzer a = (CustomAnalyzer) analyzer;
|
||||
TokenFilterFactory[] tokenFilters = a.tokenFilters();
|
||||
if (analyzer instanceof AnalyzerComponentsProvider) {
|
||||
final TokenFilterFactory[] tokenFilters = ((AnalyzerComponentsProvider) analyzer).getComponents().getTokenFilters();
|
||||
for (TokenFilterFactory tokenFilterFactory : tokenFilters) {
|
||||
if (tokenFilterFactory.breaksFastVectorHighlighter()) {
|
||||
return true;
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.AnalyzerComponentsProvider;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.ShingleTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
|
@ -675,9 +675,8 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSuggestionB
|
|||
if (analyzer instanceof NamedAnalyzer) {
|
||||
analyzer = ((NamedAnalyzer)analyzer).analyzer();
|
||||
}
|
||||
if (analyzer instanceof CustomAnalyzer) {
|
||||
final CustomAnalyzer a = (CustomAnalyzer) analyzer;
|
||||
final TokenFilterFactory[] tokenFilters = a.tokenFilters();
|
||||
if (analyzer instanceof AnalyzerComponentsProvider) {
|
||||
final TokenFilterFactory[] tokenFilters = ((AnalyzerComponentsProvider) analyzer).getComponents().getTokenFilters();
|
||||
for (TokenFilterFactory tokenFilterFactory : tokenFilters) {
|
||||
if (tokenFilterFactory instanceof ShingleTokenFilterFactory) {
|
||||
return ((ShingleTokenFilterFactory)tokenFilterFactory).getInnerFactory();
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
package org.elasticsearch.action.admin.indices.cache.clear;
|
||||
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.AbstractBroadcastResponseTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractBroadcastResponseTestCase;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.AbstractBroadcastResponseTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractBroadcastResponseTestCase;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
package org.elasticsearch.action.admin.indices.forcemerge;
|
||||
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.AbstractBroadcastResponseTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractBroadcastResponseTestCase;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
package org.elasticsearch.action.admin.indices.refresh;
|
||||
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.AbstractBroadcastResponseTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractBroadcastResponseTestCase;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
|
|
@ -21,9 +21,9 @@ package org.elasticsearch.action.admin.indices.validate.query;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.AbstractBroadcastResponseTestCase;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractBroadcastResponseTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.index.analysis.AnalyzerComponents.createComponents;
|
||||
|
||||
public class ReloadableCustomAnalyzerTests extends ESTestCase {
|
||||
|
||||
private static TestAnalysis testAnalysis;
|
||||
private static Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||
|
||||
private static TokenFilterFactory NO_OP_SEARCH_TIME_FILTER = new AbstractTokenFilterFactory(
|
||||
IndexSettingsModule.newIndexSettings("index", settings), "my_filter", Settings.EMPTY) {
|
||||
@Override
|
||||
public AnalysisMode getAnalysisMode() {
|
||||
return AnalysisMode.SEARCH_TIME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return tokenStream;
|
||||
}
|
||||
};
|
||||
|
||||
private static TokenFilterFactory LOWERCASE_SEARCH_TIME_FILTER = new AbstractTokenFilterFactory(
|
||||
IndexSettingsModule.newIndexSettings("index", settings), "my_other_filter", Settings.EMPTY) {
|
||||
@Override
|
||||
public AnalysisMode getAnalysisMode() {
|
||||
return AnalysisMode.SEARCH_TIME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return new LowerCaseFilter(tokenStream);
|
||||
}
|
||||
};
|
||||
|
||||
@BeforeClass
|
||||
public static void setup() throws IOException {
|
||||
testAnalysis = createTestAnalysis(new Index("test", "_na_"), settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* test constructor and getters
|
||||
*/
|
||||
public void testBasicCtor() {
|
||||
int positionIncrementGap = randomInt();
|
||||
int offsetGap = randomInt();
|
||||
|
||||
Settings analyzerSettings = Settings.builder()
|
||||
.put("tokenizer", "standard")
|
||||
.putList("filter", "my_filter")
|
||||
.build();
|
||||
|
||||
AnalyzerComponents components = createComponents("my_analyzer", analyzerSettings, testAnalysis.tokenizer, testAnalysis.charFilter,
|
||||
Collections.singletonMap("my_filter", NO_OP_SEARCH_TIME_FILTER));
|
||||
|
||||
try (ReloadableCustomAnalyzer analyzer = new ReloadableCustomAnalyzer(components, positionIncrementGap, offsetGap)) {
|
||||
assertEquals(positionIncrementGap, analyzer.getPositionIncrementGap(randomAlphaOfLength(5)));
|
||||
assertEquals(offsetGap >= 0 ? offsetGap : 1, analyzer.getOffsetGap(randomAlphaOfLength(5)));
|
||||
assertEquals("standard", analyzer.getComponents().getTokenizerName());
|
||||
assertEquals(0, analyzer.getComponents().getCharFilters().length);
|
||||
assertSame(testAnalysis.tokenizer.get("standard"), analyzer.getComponents().getTokenizerFactory());
|
||||
assertEquals(1, analyzer.getComponents().getTokenFilters().length);
|
||||
assertSame(NO_OP_SEARCH_TIME_FILTER, analyzer.getComponents().getTokenFilters()[0]);
|
||||
}
|
||||
|
||||
// check that when using regular non-search time filters only, we get an exception
|
||||
final Settings indexAnalyzerSettings = Settings.builder()
|
||||
.put("tokenizer", "standard")
|
||||
.putList("filter", "lowercase")
|
||||
.build();
|
||||
AnalyzerComponents indexAnalyzerComponents = createComponents("my_analyzer", indexAnalyzerSettings, testAnalysis.tokenizer,
|
||||
testAnalysis.charFilter, testAnalysis.tokenFilter);
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> new ReloadableCustomAnalyzer(indexAnalyzerComponents, positionIncrementGap, offsetGap));
|
||||
assertEquals("ReloadableCustomAnalyzer must only be initialized with analysis components in AnalysisMode.SEARCH_TIME mode",
|
||||
ex.getMessage());
|
||||
}
|
||||
|
||||
/**
|
||||
* start multiple threads that create token streams from this analyzer until reloaded tokenfilter takes effect
|
||||
*/
|
||||
public void testReloading() throws IOException, InterruptedException {
|
||||
Settings analyzerSettings = Settings.builder()
|
||||
.put("tokenizer", "standard")
|
||||
.putList("filter", "my_filter")
|
||||
.build();
|
||||
|
||||
AnalyzerComponents components = createComponents("my_analyzer", analyzerSettings, testAnalysis.tokenizer, testAnalysis.charFilter,
|
||||
Collections.singletonMap("my_filter", NO_OP_SEARCH_TIME_FILTER));
|
||||
int numThreads = randomIntBetween(5, 10);
|
||||
|
||||
ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
|
||||
CountDownLatch firstCheckpoint = new CountDownLatch(numThreads);
|
||||
CountDownLatch secondCheckpoint = new CountDownLatch(numThreads);
|
||||
|
||||
try (ReloadableCustomAnalyzer analyzer = new ReloadableCustomAnalyzer(components, 0, 0)) {
|
||||
executorService.submit(() -> {
|
||||
while (secondCheckpoint.getCount() > 0) {
|
||||
try (TokenStream firstTokenStream = analyzer.tokenStream("myField", "TEXT")) {
|
||||
firstTokenStream.reset();
|
||||
CharTermAttribute term = firstTokenStream.addAttribute(CharTermAttribute.class);
|
||||
assertTrue(firstTokenStream.incrementToken());
|
||||
if (term.toString().equals("TEXT")) {
|
||||
firstCheckpoint.countDown();
|
||||
}
|
||||
if (term.toString().equals("text")) {
|
||||
secondCheckpoint.countDown();
|
||||
}
|
||||
assertFalse(firstTokenStream.incrementToken());
|
||||
firstTokenStream.end();
|
||||
} catch (Exception e) {
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// wait until all running threads have seen the unaltered upper case analysis at least once
|
||||
assertTrue(firstCheckpoint.await(5, TimeUnit.SECONDS));
|
||||
|
||||
analyzer.reload("my_analyzer", analyzerSettings, testAnalysis.tokenizer, testAnalysis.charFilter,
|
||||
Collections.singletonMap("my_filter", LOWERCASE_SEARCH_TIME_FILTER));
|
||||
|
||||
// wait until all running threads have seen the new lower case analysis at least once
|
||||
assertTrue(secondCheckpoint.await(5, TimeUnit.SECONDS));
|
||||
|
||||
executorService.shutdown();
|
||||
executorService.awaitTermination(1, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -27,18 +29,30 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalysisMode;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.ReloadableCustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper.KeywordFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
|
||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
|
@ -49,7 +63,7 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
|
|||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return Collections.singleton(InternalSettingsPlugin.class);
|
||||
return Arrays.asList(InternalSettingsPlugin.class, ReloadableFilterPlugin.class);
|
||||
}
|
||||
|
||||
public void testTypeNameStartsWithIllegalDot() {
|
||||
|
@ -434,4 +448,97 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
|
|||
assertEquals(testString, documentMapper.mappers().getMapper(testString).simpleName());
|
||||
}
|
||||
|
||||
public void testReloadSearchAnalyzers() throws IOException {
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put("index.analysis.analyzer.reloadableAnalyzer.type", "custom")
|
||||
.put("index.analysis.analyzer.reloadableAnalyzer.tokenizer", "standard")
|
||||
.putList("index.analysis.analyzer.reloadableAnalyzer.filter", "myReloadableFilter").build();
|
||||
|
||||
MapperService mapperService = createIndex("test_index", settings).mapperService();
|
||||
CompressedXContent mapping = new CompressedXContent(BytesReference.bytes(
|
||||
XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "simple")
|
||||
.field("search_analyzer", "reloadableAnalyzer")
|
||||
.field("search_quote_analyzer", "stop")
|
||||
.endObject()
|
||||
.startObject("otherField")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "standard")
|
||||
.field("search_analyzer", "simple")
|
||||
.field("search_quote_analyzer", "reloadableAnalyzer")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject().endObject()));
|
||||
|
||||
mapperService.merge("_doc", mapping, MergeReason.MAPPING_UPDATE);
|
||||
IndexAnalyzers current = mapperService.getIndexAnalyzers();
|
||||
|
||||
ReloadableCustomAnalyzer originalReloadableAnalyzer = (ReloadableCustomAnalyzer) current.get("reloadableAnalyzer").analyzer();
|
||||
TokenFilterFactory[] originalTokenFilters = originalReloadableAnalyzer.getComponents().getTokenFilters();
|
||||
assertEquals(1, originalTokenFilters.length);
|
||||
assertEquals("myReloadableFilter", originalTokenFilters[0].name());
|
||||
|
||||
// now reload, this should change the tokenfilterFactory inside the analyzer
|
||||
mapperService.reloadSearchAnalyzers(getInstanceFromNode(AnalysisRegistry.class));
|
||||
IndexAnalyzers updatedAnalyzers = mapperService.getIndexAnalyzers();
|
||||
assertSame(current, updatedAnalyzers);
|
||||
assertSame(current.getDefaultIndexAnalyzer(), updatedAnalyzers.getDefaultIndexAnalyzer());
|
||||
assertSame(current.getDefaultSearchAnalyzer(), updatedAnalyzers.getDefaultSearchAnalyzer());
|
||||
assertSame(current.getDefaultSearchQuoteAnalyzer(), updatedAnalyzers.getDefaultSearchQuoteAnalyzer());
|
||||
|
||||
assertFalse(assertSameContainedFilters(originalTokenFilters, current.get("reloadableAnalyzer")));
|
||||
assertFalse(assertSameContainedFilters(originalTokenFilters, mapperService.fullName("field").searchAnalyzer()));
|
||||
assertFalse(assertSameContainedFilters(originalTokenFilters, mapperService.fullName("otherField").searchQuoteAnalyzer()));
|
||||
}
|
||||
|
||||
private boolean assertSameContainedFilters(TokenFilterFactory[] originalTokenFilter, NamedAnalyzer updatedAnalyzer) {
|
||||
ReloadableCustomAnalyzer updatedReloadableAnalyzer = (ReloadableCustomAnalyzer) updatedAnalyzer.analyzer();
|
||||
TokenFilterFactory[] newTokenFilters = updatedReloadableAnalyzer.getComponents().getTokenFilters();
|
||||
assertEquals(originalTokenFilter.length, newTokenFilters.length);
|
||||
int i = 0;
|
||||
for (TokenFilterFactory tf : newTokenFilters ) {
|
||||
assertEquals(originalTokenFilter[i].name(), tf.name());
|
||||
if (originalTokenFilter[i] != tf) {
|
||||
return false;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public static final class ReloadableFilterPlugin extends Plugin implements AnalysisPlugin {
|
||||
|
||||
@Override
|
||||
public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
|
||||
return Collections.singletonMap("myReloadableFilter", new AnalysisProvider<TokenFilterFactory>() {
|
||||
|
||||
@Override
|
||||
public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings)
|
||||
throws IOException {
|
||||
return new TokenFilterFactory() {
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return "myReloadableFilter";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return tokenStream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnalysisMode getAnalysisMode() {
|
||||
return AnalysisMode.SEARCH_TIME;
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,10 +17,11 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.broadcast;
|
||||
package org.elasticsearch.test;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
|
@ -56,13 +56,16 @@ import org.elasticsearch.script.ScriptService;
|
|||
import org.elasticsearch.snapshots.SourceOnlySnapshotRepository;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction;
|
||||
import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction;
|
||||
import org.elasticsearch.xpack.core.action.TransportReloadAnalyzersAction;
|
||||
import org.elasticsearch.xpack.core.action.TransportXPackInfoAction;
|
||||
import org.elasticsearch.xpack.core.action.TransportXPackUsageAction;
|
||||
import org.elasticsearch.xpack.core.action.XPackInfoAction;
|
||||
import org.elasticsearch.xpack.core.action.XPackUsageAction;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.rest.action.RestFreezeIndexAction;
|
||||
import org.elasticsearch.xpack.core.rest.action.RestReloadAnalyzersAction;
|
||||
import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction;
|
||||
import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction;
|
||||
import org.elasticsearch.xpack.core.security.authc.TokenMetaData;
|
||||
|
@ -272,6 +275,7 @@ public class XPackPlugin extends XPackClientPlugin implements ExtensiblePlugin,
|
|||
actions.add(new ActionHandler<>(TransportFreezeIndexAction.FreezeIndexAction.INSTANCE,
|
||||
TransportFreezeIndexAction.class));
|
||||
actions.addAll(licensing.getActions());
|
||||
actions.add(new ActionHandler<>(ReloadAnalyzerAction.INSTANCE, TransportReloadAnalyzersAction.class));
|
||||
return actions;
|
||||
}
|
||||
|
||||
|
@ -298,6 +302,7 @@ public class XPackPlugin extends XPackClientPlugin implements ExtensiblePlugin,
|
|||
handlers.add(new RestXPackInfoAction(settings, restController));
|
||||
handlers.add(new RestXPackUsageAction(settings, restController));
|
||||
handlers.add(new RestFreezeIndexAction(settings, restController));
|
||||
handlers.add(new RestReloadAnalyzersAction(settings, restController));
|
||||
handlers.addAll(licensing.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter,
|
||||
indexNameExpressionResolver, nodesInCluster));
|
||||
return handlers;
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.action;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
|
||||
public class ReloadAnalyzerAction extends Action<ReloadAnalyzersResponse> {
|
||||
|
||||
public static final ReloadAnalyzerAction INSTANCE = new ReloadAnalyzerAction();
|
||||
public static final String NAME = "indices:admin/reload_analyzers";
|
||||
|
||||
private ReloadAnalyzerAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReloadAnalyzersResponse newResponse() {
|
||||
return new ReloadAnalyzersResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.action;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Request for reloading index search analyzers
|
||||
*/
|
||||
public class ReloadAnalyzersRequest extends BroadcastRequest<ReloadAnalyzersRequest> {
|
||||
|
||||
/**
|
||||
* Constructs a new request for reloading index search analyzers for one or more indices
|
||||
*/
|
||||
public ReloadAnalyzersRequest(String... indices) {
|
||||
super(indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
ReloadAnalyzersRequest that = (ReloadAnalyzersRequest) o;
|
||||
return Objects.equals(indicesOptions(), that.indicesOptions())
|
||||
&& Arrays.equals(indices, that.indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(indicesOptions(), Arrays.hashCode(indices));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.action;
|
||||
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
|
||||
/**
|
||||
* The response object that will be returned when reloading analyzers
|
||||
*/
|
||||
public class ReloadAnalyzersResponse extends BroadcastResponse {
|
||||
|
||||
private final Map<String, List<String>> reloadedIndicesNodes;
|
||||
|
||||
public ReloadAnalyzersResponse() {
|
||||
reloadedIndicesNodes = Collections.emptyMap();
|
||||
}
|
||||
|
||||
public ReloadAnalyzersResponse(int totalShards, int successfulShards, int failedShards,
|
||||
List<DefaultShardOperationFailedException> shardFailures, Map<String, List<String>> reloadedIndicesNodes) {
|
||||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
this.reloadedIndicesNodes = reloadedIndicesNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Override in subclass to add custom fields following the common `_shards` field
|
||||
*/
|
||||
@Override
|
||||
protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray("reloaded_nodes");
|
||||
for (Entry<String, List<String>> indexNodesReloaded : reloadedIndicesNodes.entrySet()) {
|
||||
builder.startObject();
|
||||
builder.field("index", indexNodesReloaded.getKey());
|
||||
builder.field("reloaded_node_ids", indexNodesReloaded.getValue());
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
@SuppressWarnings({ "unchecked" })
|
||||
private static final ConstructingObjectParser<ReloadAnalyzersResponse, Void> PARSER = new ConstructingObjectParser<>("reload_analyzer",
|
||||
true, arg -> {
|
||||
BroadcastResponse response = (BroadcastResponse) arg[0];
|
||||
List<Tuple<String, List<String>>> results = (List<Tuple<String, List<String>>>) arg[1];
|
||||
Map<String, List<String>> reloadedNodeIds = new HashMap<>();
|
||||
for (Tuple<String, List<String>> result : results) {
|
||||
reloadedNodeIds.put(result.v1(), result.v2());
|
||||
}
|
||||
return new ReloadAnalyzersResponse(response.getTotalShards(), response.getSuccessfulShards(), response.getFailedShards(),
|
||||
Arrays.asList(response.getShardFailures()), reloadedNodeIds);
|
||||
});
|
||||
|
||||
@SuppressWarnings({ "unchecked" })
|
||||
private static final ConstructingObjectParser<Tuple<String, List<String>>, Void> ENTRY_PARSER = new ConstructingObjectParser<>(
|
||||
"reload_analyzer.entry", true, arg -> {
|
||||
String index = (String) arg[0];
|
||||
List<String> nodeIds = (List<String>) arg[1];
|
||||
return new Tuple<>(index, nodeIds);
|
||||
});
|
||||
|
||||
static {
|
||||
declareBroadcastFields(PARSER);
|
||||
PARSER.declareObjectArray(constructorArg(), ENTRY_PARSER, new ParseField("reloaded_nodes"));
|
||||
ENTRY_PARSER.declareString(constructorArg(), new ParseField("index"));
|
||||
ENTRY_PARSER.declareStringArray(constructorArg(), new ParseField("reloaded_node_ids"));
|
||||
}
|
||||
|
||||
public static ReloadAnalyzersResponse fromXContent(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,155 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.action;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.PlainShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.action.TransportReloadAnalyzersAction.ReloadResult;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
|
||||
/**
|
||||
* Indices clear cache action.
|
||||
*/
|
||||
public class TransportReloadAnalyzersAction
|
||||
extends TransportBroadcastByNodeAction<ReloadAnalyzersRequest, ReloadAnalyzersResponse, ReloadResult> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(TransportReloadAnalyzersAction.class);
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@Inject
|
||||
public TransportReloadAnalyzersAction(ClusterService clusterService, TransportService transportService, IndicesService indicesService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(ReloadAnalyzerAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ReloadAnalyzersRequest::new, ThreadPool.Names.MANAGEMENT, false);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReloadResult readShardResult(StreamInput in) throws IOException {
|
||||
ReloadResult reloadResult = new ReloadResult();
|
||||
reloadResult.readFrom(in);
|
||||
return reloadResult;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReloadAnalyzersResponse newResponse(ReloadAnalyzersRequest request, int totalShards, int successfulShards, int failedShards,
|
||||
List<ReloadResult> responses, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
|
||||
Map<String, List<String>> reloadedIndicesNodes = new HashMap<String, List<String>>();
|
||||
for (ReloadResult result : responses) {
|
||||
if (reloadedIndicesNodes.containsKey(result.index)) {
|
||||
List<String> nodes = reloadedIndicesNodes.get(result.index);
|
||||
nodes.add(result.nodeId);
|
||||
} else {
|
||||
List<String> nodes = new ArrayList<>();
|
||||
nodes.add(result.nodeId);
|
||||
reloadedIndicesNodes.put(result.index, nodes);
|
||||
}
|
||||
}
|
||||
return new ReloadAnalyzersResponse(totalShards, successfulShards, failedShards, shardFailures, reloadedIndicesNodes);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReloadAnalyzersRequest readRequestFrom(StreamInput in) throws IOException {
|
||||
final ReloadAnalyzersRequest request = new ReloadAnalyzersRequest();
|
||||
request.readFrom(in);
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReloadResult shardOperation(ReloadAnalyzersRequest request, ShardRouting shardRouting) throws IOException {
|
||||
logger.info("reloading analyzers for index shard " + shardRouting);
|
||||
IndexService indexService = indicesService.indexService(shardRouting.index());
|
||||
indexService.mapperService().reloadSearchAnalyzers(indicesService.getAnalysis());
|
||||
return new ReloadResult(shardRouting.index().getName(), shardRouting.currentNodeId());
|
||||
}
|
||||
|
||||
public static final class ReloadResult implements Streamable {
|
||||
String index;
|
||||
String nodeId;
|
||||
|
||||
private ReloadResult(String index, String nodeId) {
|
||||
this.index = index;
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
||||
private ReloadResult() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
this.index = in.readString();
|
||||
this.nodeId = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(index);
|
||||
out.writeString(nodeId);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The reload request should go to only one shard per node the index lives on
|
||||
*/
|
||||
@Override
|
||||
protected ShardsIterator shards(ClusterState clusterState, ReloadAnalyzersRequest request, String[] concreteIndices) {
|
||||
RoutingTable routingTable = clusterState.routingTable();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
for (String index : concreteIndices) {
|
||||
Set<String> nodesCovered = new HashSet<>();
|
||||
IndexRoutingTable indexRoutingTable = routingTable.index(index);
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
if (nodesCovered.contains(shardRouting.currentNodeId()) == false) {
|
||||
shards.add(shardRouting);
|
||||
nodesCovered.add(shardRouting.currentNodeId());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return new PlainShardsIterator(shards);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state, ReloadAnalyzersRequest request) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, ReloadAnalyzersRequest request, String[] concreteIndices) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.rest.action;
|
||||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction;
|
||||
import org.elasticsearch.xpack.core.action.ReloadAnalyzersRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
public class RestReloadAnalyzersAction extends BaseRestHandler {
|
||||
|
||||
public RestReloadAnalyzersAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.GET, "/{index}/_reload_search_analyzers", this);
|
||||
controller.registerHandler(RestRequest.Method.POST, "/{index}/_reload_search_analyzers", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "reload_search_analyzers_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
ReloadAnalyzersRequest reloadAnalyzersRequest = new ReloadAnalyzersRequest(
|
||||
Strings.splitStringByCommaToArray(request.param("index")));
|
||||
reloadAnalyzersRequest.indicesOptions(IndicesOptions.fromRequest(request, reloadAnalyzersRequest.indicesOptions()));
|
||||
return channel -> client.execute(ReloadAnalyzerAction.INSTANCE, reloadAnalyzersRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.action;
|
||||
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractBroadcastResponseTestCase;
|
||||
import org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class ReloadAnalyzersResponseTests extends AbstractBroadcastResponseTestCase<ReloadAnalyzersResponse> {
|
||||
|
||||
@Override
|
||||
protected ReloadAnalyzersResponse createTestInstance(int totalShards, int successfulShards, int failedShards,
|
||||
List<DefaultShardOperationFailedException> failures) {
|
||||
Map<String, List<String>> reloadedIndicesNodes = new HashMap<>();
|
||||
int randomIndices = randomIntBetween(0, 5);
|
||||
for (int i = 0; i < randomIndices; i++) {
|
||||
List<String> randomNodeIds = Arrays.asList(generateRandomStringArray(5, 5, false, true));
|
||||
reloadedIndicesNodes.put(randomAlphaOfLengthBetween(5, 10), randomNodeIds);
|
||||
}
|
||||
return new ReloadAnalyzersResponse(totalShards, successfulShards, failedShards, failures, reloadedIndicesNodes);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReloadAnalyzersResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return ReloadAnalyzersResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testToXContent() {
|
||||
Map<String, List<String>> reloadedIndicesNodes = Collections.singletonMap("index", Collections.singletonList("nodeId"));
|
||||
ReloadAnalyzersResponse response = new ReloadAnalyzersResponse(10, 5, 5, null, reloadedIndicesNodes);
|
||||
String output = Strings.toString(response);
|
||||
assertEquals(
|
||||
"{\"_shards\":{\"total\":10,\"successful\":5,\"failed\":5},"
|
||||
+ "\"reloaded_nodes\":[{\"index\":\"index\",\"reloaded_node_ids\":[\"nodeId\"]}]"
|
||||
+ "}",
|
||||
output);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.action;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction.AnalyzeToken;
|
||||
import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction.Response;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.analysis.common.CommonAnalysisPlugin;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin;
|
||||
import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction;
|
||||
import org.elasticsearch.xpack.core.action.ReloadAnalyzersRequest;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintWriter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
|
||||
public class ReloadSynonymAnalyzerTests extends ESSingleNodeTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class);
|
||||
}
|
||||
|
||||
public void testSynonymsUpdateable() throws FileNotFoundException, IOException {
|
||||
String synonymsFileName = "synonyms.txt";
|
||||
Path configDir = node().getEnvironment().configFile();
|
||||
if (Files.exists(configDir) == false) {
|
||||
Files.createDirectory(configDir);
|
||||
}
|
||||
Path synonymsFile = configDir.resolve(synonymsFileName);
|
||||
if (Files.exists(synonymsFile) == false) {
|
||||
Files.createFile(synonymsFile);
|
||||
}
|
||||
try (PrintWriter out = new PrintWriter(
|
||||
new OutputStreamWriter(Files.newOutputStream(synonymsFile, StandardOpenOption.WRITE), StandardCharsets.UTF_8))) {
|
||||
out.println("foo, baz");
|
||||
}
|
||||
|
||||
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
|
||||
.put("index.number_of_shards", 5)
|
||||
.put("index.number_of_replicas", 0)
|
||||
.put("analysis.analyzer.my_synonym_analyzer.tokenizer", "standard")
|
||||
.putList("analysis.analyzer.my_synonym_analyzer.filter", "lowercase", "my_synonym_filter")
|
||||
.put("analysis.filter.my_synonym_filter.type", "synonym")
|
||||
.put("analysis.filter.my_synonym_filter.updateable", "true")
|
||||
.put("analysis.filter.my_synonym_filter.synonyms_path", synonymsFileName))
|
||||
.addMapping("_doc", "field", "type=text,analyzer=standard,search_analyzer=my_synonym_analyzer"));
|
||||
|
||||
client().prepareIndex("test", "_doc", "1").setSource("field", "Foo").get();
|
||||
assertNoFailures(client().admin().indices().prepareRefresh("test").execute().actionGet());
|
||||
|
||||
SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")).get();
|
||||
assertHitCount(response, 1L);
|
||||
response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "buzz")).get();
|
||||
assertHitCount(response, 0L);
|
||||
Response analyzeResponse = client().admin().indices().prepareAnalyze("test", "foo").setAnalyzer("my_synonym_analyzer").get();
|
||||
assertEquals(2, analyzeResponse.getTokens().size());
|
||||
assertEquals("foo", analyzeResponse.getTokens().get(0).getTerm());
|
||||
assertEquals("baz", analyzeResponse.getTokens().get(1).getTerm());
|
||||
|
||||
// now update synonyms file and trigger reloading
|
||||
try (PrintWriter out = new PrintWriter(
|
||||
new OutputStreamWriter(Files.newOutputStream(synonymsFile, StandardOpenOption.WRITE), StandardCharsets.UTF_8))) {
|
||||
out.println("foo, baz, buzz");
|
||||
}
|
||||
assertNoFailures(client().execute(ReloadAnalyzerAction.INSTANCE, new ReloadAnalyzersRequest("test")).actionGet());
|
||||
|
||||
analyzeResponse = client().admin().indices().prepareAnalyze("test", "Foo").setAnalyzer("my_synonym_analyzer").get();
|
||||
assertEquals(3, analyzeResponse.getTokens().size());
|
||||
Set<String> tokens = new HashSet<>();
|
||||
analyzeResponse.getTokens().stream().map(AnalyzeToken::getTerm).forEach(t -> tokens.add(t));
|
||||
assertTrue(tokens.contains("foo"));
|
||||
assertTrue(tokens.contains("baz"));
|
||||
assertTrue(tokens.contains("buzz"));
|
||||
|
||||
response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")).get();
|
||||
assertHitCount(response, 1L);
|
||||
response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "buzz")).get();
|
||||
assertHitCount(response, 1L);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.rest.action;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction.AnalyzeToken;
|
||||
import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction.Response;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.analysis.common.CommonAnalysisPlugin;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin;
|
||||
import org.elasticsearch.xpack.core.XPackSettings;
|
||||
import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction;
|
||||
import org.elasticsearch.xpack.core.action.ReloadAnalyzersRequest;
|
||||
import org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintWriter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
|
||||
public class ReloadSynonymAnalyzerIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* This test needs to write to the config directory, this is difficult in an external cluster so we overwrite this to force running with
|
||||
* {@link InternalTestCluster}
|
||||
*/
|
||||
@Override
|
||||
protected boolean ignoreExternalCluster() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void testSynonymsUpdateable() throws FileNotFoundException, IOException, InterruptedException {
|
||||
Path config = internalCluster().getInstance(Environment.class).configFile();
|
||||
String synonymsFileName = "synonyms.txt";
|
||||
Path synonymsFile = config.resolve(synonymsFileName);
|
||||
Files.createFile(synonymsFile);
|
||||
assertTrue(Files.exists(synonymsFile));
|
||||
try (PrintWriter out = new PrintWriter(
|
||||
new OutputStreamWriter(Files.newOutputStream(synonymsFile, StandardOpenOption.CREATE), StandardCharsets.UTF_8))) {
|
||||
out.println("foo, baz");
|
||||
}
|
||||
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
|
||||
.put("index.number_of_shards", cluster().numDataNodes() * 2)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put("analysis.analyzer.my_synonym_analyzer.tokenizer", "standard")
|
||||
.put("analysis.analyzer.my_synonym_analyzer.filter", "my_synonym_filter")
|
||||
.put("analysis.filter.my_synonym_filter.type", "synonym")
|
||||
.put("analysis.filter.my_synonym_filter.updateable", "true")
|
||||
.put("analysis.filter.my_synonym_filter.synonyms_path", synonymsFileName))
|
||||
.addMapping("_doc", "field", "type=text,analyzer=standard,search_analyzer=my_synonym_analyzer"));
|
||||
|
||||
client().prepareIndex("test", "_doc", "1").setSource("field", "foo").get();
|
||||
assertNoFailures(client().admin().indices().prepareRefresh("test").execute().actionGet());
|
||||
|
||||
SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")).get();
|
||||
assertHitCount(response, 1L);
|
||||
response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "buzz")).get();
|
||||
assertHitCount(response, 0L);
|
||||
Response analyzeResponse = client().admin().indices().prepareAnalyze("test", "foo").setAnalyzer("my_synonym_analyzer").get();
|
||||
assertEquals(2, analyzeResponse.getTokens().size());
|
||||
assertEquals("foo", analyzeResponse.getTokens().get(0).getTerm());
|
||||
assertEquals("baz", analyzeResponse.getTokens().get(1).getTerm());
|
||||
|
||||
// now update synonyms file several times and trigger reloading
|
||||
for (int i = 0; i < 10; i++) {
|
||||
String testTerm = randomAlphaOfLength(10);
|
||||
try (PrintWriter out = new PrintWriter(
|
||||
new OutputStreamWriter(Files.newOutputStream(synonymsFile, StandardOpenOption.WRITE), StandardCharsets.UTF_8))) {
|
||||
out.println("foo, baz, " + testTerm);
|
||||
}
|
||||
ReloadAnalyzersResponse reloadResponse = client().execute(ReloadAnalyzerAction.INSTANCE, new ReloadAnalyzersRequest("test"))
|
||||
.actionGet();
|
||||
assertNoFailures(reloadResponse);
|
||||
assertEquals(cluster().numDataNodes(), reloadResponse.getSuccessfulShards());
|
||||
|
||||
analyzeResponse = client().admin().indices().prepareAnalyze("test", "foo").setAnalyzer("my_synonym_analyzer").get();
|
||||
assertEquals(3, analyzeResponse.getTokens().size());
|
||||
Set<String> tokens = new HashSet<>();
|
||||
analyzeResponse.getTokens().stream().map(AnalyzeToken::getTerm).forEach(t -> tokens.add(t));
|
||||
assertTrue(tokens.contains("foo"));
|
||||
assertTrue(tokens.contains("baz"));
|
||||
assertTrue(tokens.contains(testTerm));
|
||||
|
||||
response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")).get();
|
||||
assertHitCount(response, 1L);
|
||||
response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", testTerm)).get();
|
||||
assertHitCount(response, 1L);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
{
|
||||
"indices.reload_search_analyzers": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html",
|
||||
"stability": "experimental",
|
||||
"methods": ["GET", "POST"],
|
||||
"url": {
|
||||
"paths": ["/{index}/_reload_search_analyzers"],
|
||||
"parts": {
|
||||
"index": {
|
||||
"type": "list",
|
||||
"description" : "A comma-separated list of index names to reload analyzers for"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"ignore_unavailable": {
|
||||
"type" : "boolean",
|
||||
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
||||
},
|
||||
"allow_no_indices": {
|
||||
"type" : "boolean",
|
||||
"description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
|
||||
},
|
||||
"expand_wildcards": {
|
||||
"type" : "enum",
|
||||
"options" : ["open","closed","none","all"],
|
||||
"default" : "open",
|
||||
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": null
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue