Merge branch 'master' into index-lifecycle

This commit is contained in:
Colin Goodheart-Smithe 2018-06-20 10:31:45 +01:00
commit c5b69c87c6
No known key found for this signature in database
GPG Key ID: F975E7BDD739B3C7
184 changed files with 1154 additions and 779 deletions

View File

@ -27,7 +27,6 @@ import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
@ -40,8 +39,8 @@ public class TransportNoopBulkAction extends HandledTransportAction<BulkRequest,
@Inject
public TransportNoopBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NoopBulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
ActionFilters actionFilters) {
super(settings, NoopBulkAction.NAME, threadPool, transportService, actionFilters, BulkRequest::new);
}
@Override

View File

@ -24,8 +24,8 @@ import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.SearchHit;
@ -40,10 +40,10 @@ import java.util.Collections;
public class TransportNoopSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
@Inject
public TransportNoopSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters
actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NoopSearchAction.NAME, threadPool, transportService, actionFilters, SearchRequest::new,
indexNameExpressionResolver);
public TransportNoopSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters) {
super(settings, NoopSearchAction.NAME, threadPool, transportService, actionFilters,
(Writeable.Reader<SearchRequest>) SearchRequest::new);
}
@Override

View File

@ -35,6 +35,8 @@ include::tokenfilters/word-delimiter-tokenfilter.asciidoc[]
include::tokenfilters/word-delimiter-graph-tokenfilter.asciidoc[]
include::tokenfilters/multiplexer-tokenfilter.asciidoc[]
include::tokenfilters/stemmer-tokenfilter.asciidoc[]
include::tokenfilters/stemmer-override-tokenfilter.asciidoc[]

View File

@ -0,0 +1,116 @@
[[analysis-multiplexer-tokenfilter]]
=== Multiplexer Token Filter
A token filter of type `multiplexer` will emit multiple tokens at the same position,
each version of the token having been run through a different filter. Identical
output tokens at the same position will be removed.
WARNING: If the incoming token stream has duplicate tokens, then these will also be
removed by the multiplexer
[float]
=== Options
[horizontal]
filters:: a list of token filters to apply to incoming tokens. These can be any
token filters defined elsewhere in the index mappings. Filters can be chained
using a comma-delimited string, so for example `"lowercase, porter_stem"` would
apply the `lowercase` filter and then the `porter_stem` filter to a single token.
WARNING: Shingle or multi-word synonym token filters will not function normally
when they are declared in the filters array because they read ahead internally
which is unsupported by the multiplexer
preserve_original:: if `true` (the default) then emit the original token in
addition to the filtered tokens
[float]
=== Settings example
You can set it up like:
[source,js]
--------------------------------------------------
PUT /multiplexer_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : [ "my_multiplexer" ]
}
},
"filter" : {
"my_multiplexer" : {
"type" : "multiplexer",
"filters" : [ "lowercase", "lowercase, porter_stem" ]
}
}
}
}
}
--------------------------------------------------
// CONSOLE
And test it like:
[source,js]
--------------------------------------------------
POST /multiplexer_example/_analyze
{
"analyzer" : "my_analyzer",
"text" : "Going HOME"
}
--------------------------------------------------
// CONSOLE
// TEST[continued]
And it'd respond:
[source,js]
--------------------------------------------------
{
"tokens": [
{
"token": "Going",
"start_offset": 0,
"end_offset": 5,
"type": "<ALPHANUM>",
"position": 0
},
{
"token": "going",
"start_offset": 0,
"end_offset": 5,
"type": "<ALPHANUM>",
"position": 0
},
{
"token": "go",
"start_offset": 0,
"end_offset": 5,
"type": "<ALPHANUM>",
"position": 0
},
{
"token": "HOME",
"start_offset": 6,
"end_offset": 10,
"type": "<ALPHANUM>",
"position": 1
},
{
"token": "home", <1>
"start_offset": 6,
"end_offset": 10,
"type": "<ALPHANUM>",
"position": 1
}
]
}
--------------------------------------------------
// TESTRESPONSE
<1> The stemmer has also emitted a token `home` at position 1, but because it is a
duplicate of this token it has been removed from the token stream

View File

@ -184,12 +184,6 @@ is the same as `like`.
`fields`::
A list of fields to fetch and analyze the text from.
`like_text`::
The text to find documents like it.
`ids` or `docs`::
A list of documents following the same syntax as the <<docs-multi-get,Multi GET API>>.
[float]
[[mlt-query-term-selection]]
==== Term Selection Parameters

View File

@ -226,6 +226,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
filters.put("limit", LimitTokenCountFilterFactory::new);
filters.put("lowercase", LowerCaseTokenFilterFactory::new);
filters.put("min_hash", MinHashTokenFilterFactory::new);
filters.put("multiplexer", MultiplexerTokenFilterFactory::new);
filters.put("ngram", NGramTokenFilterFactory::new);
filters.put("nGram", NGramTokenFilterFactory::new);
filters.put("pattern_capture", requriesAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new));

View File

@ -0,0 +1,195 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.ConditionalTokenFilter;
import org.apache.lucene.analysis.miscellaneous.RemoveDuplicatesTokenFilter;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
import org.elasticsearch.index.analysis.ReferringFilterFactory;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
public class MultiplexerTokenFilterFactory extends AbstractTokenFilterFactory implements ReferringFilterFactory {
private List<TokenFilterFactory> filters;
private List<String> filterNames;
private final boolean preserveOriginal;
private static final TokenFilterFactory IDENTITY_FACTORY = new TokenFilterFactory() {
@Override
public String name() {
return "identity";
}
@Override
public TokenStream create(TokenStream tokenStream) {
return tokenStream;
}
};
public MultiplexerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) throws IOException {
super(indexSettings, name, settings);
this.filterNames = settings.getAsList("filters");
this.preserveOriginal = settings.getAsBoolean("preserve_original", true);
}
@Override
public TokenStream create(TokenStream tokenStream) {
List<Function<TokenStream, TokenStream>> functions = new ArrayList<>();
for (TokenFilterFactory tff : filters) {
functions.add(tff::create);
}
return new RemoveDuplicatesTokenFilter(new MultiplexTokenFilter(tokenStream, functions));
}
@Override
public void setReferences(Map<String, TokenFilterFactory> factories) {
filters = new ArrayList<>();
if (preserveOriginal) {
filters.add(IDENTITY_FACTORY);
}
for (String filter : filterNames) {
String[] parts = Strings.tokenizeToStringArray(filter, ",");
if (parts.length == 1) {
filters.add(resolveFilterFactory(factories, parts[0]));
} else {
List<TokenFilterFactory> chain = new ArrayList<>();
for (String subfilter : parts) {
chain.add(resolveFilterFactory(factories, subfilter));
}
filters.add(chainFilters(filter, chain));
}
}
}
private TokenFilterFactory chainFilters(String name, List<TokenFilterFactory> filters) {
return new TokenFilterFactory() {
@Override
public String name() {
return name;
}
@Override
public TokenStream create(TokenStream tokenStream) {
for (TokenFilterFactory tff : filters) {
tokenStream = tff.create(tokenStream);
}
return tokenStream;
}
};
}
private TokenFilterFactory resolveFilterFactory(Map<String, TokenFilterFactory> factories, String name) {
if (factories.containsKey(name) == false) {
throw new IllegalArgumentException("Multiplexing filter [" + name() + "] refers to undefined tokenfilter [" + name + "]");
} else {
return factories.get(name);
}
}
private final class MultiplexTokenFilter extends TokenFilter {
private final TokenStream source;
private final int filterCount;
private int selector;
/**
* Creates a MultiplexTokenFilter on the given input with a set of filters
*/
MultiplexTokenFilter(TokenStream input, List<Function<TokenStream, TokenStream>> filters) {
super(input);
TokenStream source = new MultiplexerFilter(input);
for (int i = 0; i < filters.size(); i++) {
final int slot = i;
source = new ConditionalTokenFilter(source, filters.get(i)) {
@Override
protected boolean shouldFilter() {
return slot == selector;
}
};
}
this.source = source;
this.filterCount = filters.size();
this.selector = filterCount - 1;
}
@Override
public boolean incrementToken() throws IOException {
return source.incrementToken();
}
@Override
public void end() throws IOException {
source.end();
}
@Override
public void reset() throws IOException {
source.reset();
}
private final class MultiplexerFilter extends TokenFilter {
State state;
PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
private MultiplexerFilter(TokenStream input) {
super(input);
}
@Override
public boolean incrementToken() throws IOException {
if (selector >= filterCount - 1) {
selector = 0;
if (input.incrementToken() == false) {
return false;
}
state = captureState();
return true;
}
restoreState(state);
posIncAtt.setPositionIncrement(0);
selector++;
return true;
}
@Override
public void reset() throws IOException {
super.reset();
selector = filterCount - 1;
this.state = null;
}
}
}
}

View File

@ -0,0 +1,106 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.analysis.common;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import java.io.IOException;
import java.util.Collections;
public class MultiplexerTokenFilterTests extends ESTokenStreamTestCase {
public void testMultiplexingFilter() throws IOException {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
Settings indexSettings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("index.analysis.filter.t.type", "truncate")
.put("index.analysis.filter.t.length", "2")
.put("index.analysis.filter.multiplexFilter.type", "multiplexer")
.putList("index.analysis.filter.multiplexFilter.filters", "lowercase, t", "uppercase")
.put("index.analysis.analyzer.myAnalyzer.type", "custom")
.put("index.analysis.analyzer.myAnalyzer.tokenizer", "standard")
.putList("index.analysis.analyzer.myAnalyzer.filter", "multiplexFilter")
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings),
Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings);
try (NamedAnalyzer analyzer = indexAnalyzers.get("myAnalyzer")) {
assertNotNull(analyzer);
assertAnalyzesTo(analyzer, "ONe tHree", new String[]{
"ONe", "on", "ONE", "tHree", "th", "THREE"
}, new int[]{
1, 0, 0, 1, 0, 0
});
// Duplicates are removed
assertAnalyzesTo(analyzer, "ONe THREE", new String[]{
"ONe", "on", "ONE", "THREE", "th"
}, new int[]{
1, 0, 0, 1, 0, 0
});
}
}
public void testMultiplexingNoOriginal() throws IOException {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
Settings indexSettings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("index.analysis.filter.t.type", "truncate")
.put("index.analysis.filter.t.length", "2")
.put("index.analysis.filter.multiplexFilter.type", "multiplexer")
.put("index.analysis.filter.multiplexFilter.preserve_original", "false")
.putList("index.analysis.filter.multiplexFilter.filters", "lowercase, t", "uppercase")
.put("index.analysis.analyzer.myAnalyzer.type", "custom")
.put("index.analysis.analyzer.myAnalyzer.tokenizer", "standard")
.putList("index.analysis.analyzer.myAnalyzer.filter", "multiplexFilter")
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings),
Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings);
try (NamedAnalyzer analyzer = indexAnalyzers.get("myAnalyzer")) {
assertNotNull(analyzer);
assertAnalyzesTo(analyzer, "ONe tHree", new String[]{
"on", "ONE", "th", "THREE"
}, new int[]{
1, 0, 1, 0,
});
}
}
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -116,9 +115,8 @@ public class GrokProcessorGetAction extends Action<GrokProcessorGetAction.Respon
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, threadPool, transportService, actionFilters,
indexNameExpressionResolver, Request::new);
ActionFilters actionFilters) {
super(settings, NAME, threadPool, transportService, actionFilters, Request::new);
}
@Override

View File

@ -26,7 +26,6 @@ import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.TransportMultiSearchAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -47,11 +46,9 @@ public class TransportMultiSearchTemplateAction extends HandledTransportAction<M
@Inject
public TransportMultiSearchTemplateAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver resolver,
ScriptService scriptService, NamedXContentRegistry xContentRegistry,
TransportMultiSearchAction multiSearchAction) {
super(settings, MultiSearchTemplateAction.NAME, threadPool, transportService, actionFilters, resolver,
MultiSearchTemplateRequest::new);
ActionFilters actionFilters, ScriptService scriptService,
NamedXContentRegistry xContentRegistry, TransportMultiSearchAction multiSearchAction) {
super(settings, MultiSearchTemplateAction.NAME, threadPool, transportService, actionFilters, MultiSearchTemplateRequest::new);
this.scriptService = scriptService;
this.xContentRegistry = xContentRegistry;
this.multiSearchAction = multiSearchAction;

View File

@ -25,7 +25,6 @@ import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.TransportSearchAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -44,6 +43,7 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Collections;
import java.util.function.Supplier;
public class TransportSearchTemplateAction extends HandledTransportAction<SearchTemplateRequest, SearchTemplateResponse> {
@ -55,11 +55,12 @@ public class TransportSearchTemplateAction extends HandledTransportAction<Search
@Inject
public TransportSearchTemplateAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver resolver,
ActionFilters actionFilters,
ScriptService scriptService,
TransportSearchAction searchAction,
NamedXContentRegistry xContentRegistry) {
super(settings, SearchTemplateAction.NAME, threadPool, transportService, actionFilters, resolver, SearchTemplateRequest::new);
super(settings, SearchTemplateAction.NAME, threadPool, transportService, actionFilters,
(Supplier<SearchTemplateRequest>) SearchTemplateRequest::new);
this.scriptService = scriptService;
this.searchAction = searchAction;
this.xContentRegistry = xContentRegistry;

View File

@ -28,7 +28,6 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -282,9 +281,8 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Response
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ScriptService scriptService) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
ActionFilters actionFilters, ScriptService scriptService) {
super(settings, NAME, threadPool, transportService, actionFilters, Request::new);
this.scriptService = scriptService;
}
@Override

View File

@ -27,9 +27,9 @@ import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -73,11 +73,11 @@ public class TransportRankEvalAction extends HandledTransportAction<RankEvalRequ
private final NamedXContentRegistry namedXContentRegistry;
@Inject
public TransportRankEvalAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Client client, TransportService transportService,
ScriptService scriptService, NamedXContentRegistry namedXContentRegistry) {
super(settings, RankEvalAction.NAME, threadPool, transportService, actionFilters, RankEvalRequest::new,
indexNameExpressionResolver);
public TransportRankEvalAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, Client client,
TransportService transportService, ScriptService scriptService,
NamedXContentRegistry namedXContentRegistry) {
super(settings, RankEvalAction.NAME, threadPool, transportService, actionFilters,
(Writeable.Reader<RankEvalRequest>) RankEvalRequest::new);
this.scriptService = scriptService;
this.namedXContentRegistry = namedXContentRegistry;
this.client = client;

View File

@ -19,13 +19,14 @@
package org.elasticsearch.index.reindex;
import java.util.function.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ParentTaskAssigningClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -40,10 +41,10 @@ public class TransportDeleteByQueryAction extends HandledTransportAction<DeleteB
private final ClusterService clusterService;
@Inject
public TransportDeleteByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver resolver, Client client, TransportService transportService,
ScriptService scriptService, ClusterService clusterService) {
super(settings, DeleteByQueryAction.NAME, threadPool, transportService, actionFilters, resolver, DeleteByQueryRequest::new);
public TransportDeleteByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, Client client,
TransportService transportService, ScriptService scriptService, ClusterService clusterService) {
super(settings, DeleteByQueryAction.NAME, threadPool, transportService, actionFilters,
(Supplier<DeleteByQueryRequest>) DeleteByQueryRequest::new);
this.client = client;
this.scriptService = scriptService;
this.clusterService = clusterService;

View File

@ -97,18 +97,20 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
private final AutoCreateIndex autoCreateIndex;
private final Client client;
private final CharacterRunAutomaton remoteWhitelist;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportReindexAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, ScriptService scriptService,
AutoCreateIndex autoCreateIndex, Client client, TransportService transportService) {
super(settings, ReindexAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
ReindexRequest::new);
super(settings, ReindexAction.NAME, threadPool, transportService, actionFilters,
ReindexRequest::new);
this.clusterService = clusterService;
this.scriptService = scriptService;
this.autoCreateIndex = autoCreateIndex;
this.client = client;
remoteWhitelist = buildRemoteWhitelist(REMOTE_CLUSTER_WHITELIST.get(settings));
this.indexNameExpressionResolver = indexNameExpressionResolver;
}
@Override

View File

@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -45,10 +44,9 @@ public class TransportRethrottleAction extends TransportTasksAction<BulkByScroll
@Inject
public TransportRethrottleAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
Client client) {
super(settings, RethrottleAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
RethrottleRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT);
TransportService transportService, ActionFilters actionFilters, Client client) {
super(settings, RethrottleAction.NAME, threadPool, clusterService, transportService, actionFilters,
RethrottleRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT);
this.client = client;
}

View File

@ -27,7 +27,6 @@ import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ParentTaskAssigningClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -44,6 +43,7 @@ import org.elasticsearch.transport.TransportService;
import java.util.Map;
import java.util.function.BiFunction;
import java.util.function.Supplier;
public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateByQueryRequest, BulkByScrollResponse> {
private final Client client;
@ -51,11 +51,10 @@ public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateB
private final ClusterService clusterService;
@Inject
public TransportUpdateByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Client client, TransportService transportService,
ScriptService scriptService, ClusterService clusterService) {
public TransportUpdateByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, Client client,
TransportService transportService, ScriptService scriptService, ClusterService clusterService) {
super(settings, UpdateByQueryAction.NAME, threadPool, transportService, actionFilters,
indexNameExpressionResolver, UpdateByQueryRequest::new);
(Supplier<UpdateByQueryRequest>) UpdateByQueryRequest::new);
this.client = client;
this.scriptService = scriptService;
this.clusterService = clusterService;

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -43,11 +42,10 @@ public class TransportNodesHotThreadsAction extends TransportNodesAction<NodesHo
NodeHotThreads> {
@Inject
public TransportNodesHotThreadsAction(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
public TransportNodesHotThreadsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters) {
super(settings, NodesHotThreadsAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, NodesHotThreadsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeHotThreads.class);
NodesHotThreadsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeHotThreads.class);
}
@Override

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -44,12 +43,10 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
private final NodeService nodeService;
@Inject
public TransportNodesInfoAction(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
NodeService nodeService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
public TransportNodesInfoAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, NodeService nodeService, ActionFilters actionFilters) {
super(settings, NodesInfoAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, NodesInfoRequest::new, NodeInfoRequest::new, ThreadPool.Names.MANAGEMENT, NodeInfo.class);
NodesInfoRequest::new, NodeInfoRequest::new, ThreadPool.Names.MANAGEMENT, NodeInfo.class);
this.nodeService = nodeService;
}

View File

@ -26,7 +26,6 @@ import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -55,11 +54,11 @@ public class TransportNodesReloadSecureSettingsAction extends TransportNodesActi
@Inject
public TransportNodesReloadSecureSettingsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Environment environment,
Environment environment,
PluginsService pluginService) {
super(settings, NodesReloadSecureSettingsAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, NodesReloadSecureSettingsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC,
NodesReloadSecureSettingsResponse.NodeResponse.class);
NodesReloadSecureSettingsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC,
NodesReloadSecureSettingsResponse.NodeResponse.class);
this.environment = environment;
this.pluginsService = pluginService;
}

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -46,10 +45,9 @@ public class TransportNodesStatsAction extends TransportNodesAction<NodesStatsRe
@Inject
public TransportNodesStatsAction(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
NodeService nodeService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
NodeService nodeService, ActionFilters actionFilters) {
super(settings, NodesStatsAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, NodesStatsRequest::new, NodeStatsRequest::new, ThreadPool.Names.MANAGEMENT, NodeStats.class);
NodesStatsRequest::new, NodeStatsRequest::new, ThreadPool.Names.MANAGEMENT, NodeStats.class);
this.nodeService = nodeService;
}

View File

@ -26,7 +26,6 @@ import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
@ -64,11 +63,9 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
@Inject
public TransportCancelTasksAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver
indexNameExpressionResolver) {
TransportService transportService, ActionFilters actionFilters) {
super(settings, CancelTasksAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, CancelTasksRequest::new, CancelTasksResponse::new,
ThreadPool.Names.MANAGEMENT);
CancelTasksRequest::new, CancelTasksResponse::new, ThreadPool.Names.MANAGEMENT);
transportService.registerRequestHandler(BAN_PARENT_ACTION_NAME, BanParentTaskRequest::new, ThreadPool.Names.SAME, new
BanParentRequestHandler());
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
@ -72,9 +71,8 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
@Inject
public TransportGetTaskAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, Client client,
NamedXContentRegistry xContentRegistry) {
super(settings, GetTaskAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, GetTaskRequest::new);
ClusterService clusterService, Client client, NamedXContentRegistry xContentRegistry) {
super(settings, GetTaskAction.NAME, threadPool, transportService, actionFilters, GetTaskRequest::new);
this.clusterService = clusterService;
this.transportService = transportService;
this.client = client;

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -53,9 +52,9 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
@Inject
public TransportListTasksAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ListTasksAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
ListTasksRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT);
TransportService transportService, ActionFilters actionFilters) {
super(settings, ListTasksAction.NAME, threadPool, clusterService, transportService, actionFilters,
ListTasksRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT);
}
@Override

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -43,10 +42,9 @@ public class TransportNodesUsageAction
@Inject
public TransportNodesUsageAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, UsageService usageService) {
super(settings, NodesUsageAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
NodesUsageRequest::new, NodeUsageRequest::new, ThreadPool.Names.MANAGEMENT, NodeUsage.class);
TransportService transportService, ActionFilters actionFilters, UsageService usageService) {
super(settings, NodesUsageAction.NAME, threadPool, clusterService, transportService, actionFilters,
NodesUsageRequest::new, NodeUsageRequest::new, ThreadPool.Names.MANAGEMENT, NodeUsage.class);
this.usageService = usageService;
}

View File

@ -19,12 +19,13 @@
package org.elasticsearch.action.admin.cluster.remote;
import java.util.function.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.transport.RemoteClusterService;
import org.elasticsearch.action.search.SearchTransportService;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
@ -38,10 +39,9 @@ public final class TransportRemoteInfoAction extends HandledTransportAction<Remo
@Inject
public TransportRemoteInfoAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
SearchTransportService searchTransportService) {
super(settings, RemoteInfoAction.NAME, threadPool, transportService, actionFilters, RemoteInfoRequest::new,
indexNameExpressionResolver);
ActionFilters actionFilters, SearchTransportService searchTransportService) {
super(settings, RemoteInfoAction.NAME, threadPool, transportService, actionFilters,
(Supplier<RemoteInfoRequest>) RemoteInfoRequest::new);
this.remoteClusterService = searchTransportService.getRemoteClusterService();
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest;
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
@ -63,12 +62,11 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
private final SnapshotShardsService snapshotShardsService;
@Inject
public TransportNodesSnapshotsStatus(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
SnapshotShardsService snapshotShardsService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
Request::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeSnapshotStatus.class);
public TransportNodesSnapshotsStatus(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, SnapshotShardsService snapshotShardsService,
ActionFilters actionFilters) {
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters,
Request::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeSnapshotStatus.class);
this.snapshotShardsService = snapshotShardsService;
}

View File

@ -30,7 +30,6 @@ import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterStateHealth;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -58,13 +57,11 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
@Inject
public TransportClusterStatsAction(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
NodeService nodeService, IndicesService indicesService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
public TransportClusterStatsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, NodeService nodeService, IndicesService indicesService,
ActionFilters actionFilters) {
super(settings, ClusterStatsAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, ClusterStatsRequest::new, ClusterStatsNodeRequest::new, ThreadPool.Names.MANAGEMENT,
ClusterStatsNodeResponse.class);
ClusterStatsRequest::new, ClusterStatsNodeRequest::new, ThreadPool.Names.MANAGEMENT, ClusterStatsNodeResponse.class);
this.nodeService = nodeService;
this.indicesService = indicesService;
}

View File

@ -19,10 +19,11 @@
package org.elasticsearch.action.admin.indices.flush;
import java.util.function.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.flush.SyncedFlushService;
@ -37,11 +38,10 @@ public class TransportSyncedFlushAction extends HandledTransportAction<SyncedFlu
SyncedFlushService syncedFlushService;
@Inject
public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
SyncedFlushService syncedFlushService) {
super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SyncedFlushRequest::new);
public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, SyncedFlushService syncedFlushService) {
super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters,
(Supplier<SyncedFlushRequest>) SyncedFlushRequest::new);
this.syncedFlushService = syncedFlushService;
}

View File

@ -41,14 +41,16 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction<GetF
private final ClusterService clusterService;
private final TransportGetFieldMappingsIndexAction shardAction;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportGetFieldMappingsAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, TransportGetFieldMappingsIndexAction shardAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, GetFieldMappingsAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, GetFieldMappingsRequest::new);
super(settings, GetFieldMappingsAction.NAME, threadPool, transportService, actionFilters, GetFieldMappingsRequest::new);
this.clusterService = clusterService;
this.shardAction = shardAction;
this.indexNameExpressionResolver = indexNameExpressionResolver;
}
@Override

View File

@ -91,6 +91,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
private final TransportCreateIndexAction createIndexAction;
private final LongSupplier relativeTimeProvider;
private final IngestActionForwarder ingestForwarder;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService,
@ -110,7 +111,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) {
super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, BulkRequest::new);
Objects.requireNonNull(relativeTimeProvider);
this.clusterService = clusterService;
this.ingestService = ingestService;
@ -119,6 +120,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
this.autoCreateIndex = autoCreateIndex;
this.relativeTimeProvider = relativeTimeProvider;
this.ingestForwarder = new IngestActionForwarder(transportService);
this.indexNameExpressionResolver = indexNameExpressionResolver;
clusterService.addStateApplier(this.ingestForwarder);
}

View File

@ -46,19 +46,18 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
private final ClusterService clusterService;
private final TransportFieldCapabilitiesIndexAction shardAction;
private final RemoteClusterService remoteClusterService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportFieldCapabilitiesAction(Settings settings, TransportService transportService,
ClusterService clusterService, ThreadPool threadPool,
TransportFieldCapabilitiesIndexAction shardAction,
ActionFilters actionFilters,
IndexNameExpressionResolver
indexNameExpressionResolver) {
super(settings, FieldCapabilitiesAction.NAME, threadPool, transportService,
actionFilters, indexNameExpressionResolver, FieldCapabilitiesRequest::new);
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, FieldCapabilitiesAction.NAME, threadPool, transportService, actionFilters, FieldCapabilitiesRequest::new);
this.clusterService = clusterService;
this.remoteClusterService = transportService.getRemoteClusterService();
this.shardAction = shardAction;
this.indexNameExpressionResolver = indexNameExpressionResolver;
}
@Override

View File

@ -40,16 +40,17 @@ import java.util.concurrent.atomic.AtomicInteger;
public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequest, MultiGetResponse> {
private final ClusterService clusterService;
private final TransportShardMultiGetAction shardAction;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportMultiGetAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ClusterService clusterService, TransportShardMultiGetAction shardAction,
ActionFilters actionFilters, IndexNameExpressionResolver resolver) {
super(settings, MultiGetAction.NAME, threadPool, transportService, actionFilters, resolver, MultiGetRequest::new);
super(settings, MultiGetAction.NAME, threadPool, transportService, actionFilters, MultiGetRequest::new);
this.clusterService = clusterService;
this.shardAction = shardAction;
this.indexNameExpressionResolver = resolver;
}
@Override

View File

@ -22,8 +22,8 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.ingest.PipelineStore;
@ -39,8 +39,10 @@ public class SimulatePipelineTransportAction extends HandledTransportAction<Simu
private final SimulateExecutionService executionService;
@Inject
public SimulatePipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
super(settings, SimulatePipelineAction.NAME, threadPool, transportService, actionFilters, SimulatePipelineRequest::new, indexNameExpressionResolver);
public SimulatePipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, NodeService nodeService) {
super(settings, SimulatePipelineAction.NAME, threadPool, transportService, actionFilters,
(Writeable.Reader<SimulatePipelineRequest>) SimulatePipelineRequest::new);
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
this.executionService = new SimulateExecutionService(threadPool);
}

View File

@ -25,7 +25,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -40,9 +39,8 @@ public class TransportMainAction extends HandledTransportAction<MainRequest, Mai
@Inject
public TransportMainAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ClusterService clusterService) {
super(settings, MainAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MainRequest::new);
ActionFilters actionFilters, ClusterService clusterService) {
super(settings, MainAction.NAME, threadPool, transportService, actionFilters, MainRequest::new);
this.clusterService = clusterService;
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -37,9 +36,8 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
@Inject
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ClusterService clusterService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
SearchTransportService searchTransportService) {
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters,
ClearScrollRequest::new);
this.clusterService = clusterService;
this.searchTransportService = searchTransportService;

View File

@ -25,7 +25,6 @@ import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.TransportAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -49,9 +48,8 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
@Inject
public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ClusterService clusterService, TransportSearchAction searchAction,
ActionFilters actionFilters, IndexNameExpressionResolver resolver) {
super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters, resolver, MultiSearchRequest::new);
ClusterService clusterService, TransportSearchAction searchAction, ActionFilters actionFilters) {
super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters, MultiSearchRequest::new);
this.clusterService = clusterService;
this.searchAction = searchAction;
this.availableProcessors = EsExecutors.numberOfProcessors(settings);
@ -60,8 +58,8 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
TransportMultiSearchAction(ThreadPool threadPool, ActionFilters actionFilters, TransportService transportService,
ClusterService clusterService, TransportAction<SearchRequest, SearchResponse> searchAction,
IndexNameExpressionResolver resolver, int availableProcessors, LongSupplier relativeTimeProvider) {
super(Settings.EMPTY, MultiSearchAction.NAME, threadPool, transportService, actionFilters, resolver, MultiSearchRequest::new);
int availableProcessors, LongSupplier relativeTimeProvider) {
super(Settings.EMPTY, MultiSearchAction.NAME, threadPool, transportService, actionFilters, MultiSearchRequest::new);
this.clusterService = clusterService;
this.searchAction = searchAction;
this.availableProcessors = availableProcessors;

View File

@ -34,6 +34,7 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@ -74,19 +75,22 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
private final RemoteClusterService remoteClusterService;
private final SearchPhaseController searchPhaseController;
private final SearchService searchService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, SearchService searchService,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
ClusterService clusterService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, SearchRequest::new, indexNameExpressionResolver);
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters,
(Writeable.Reader<SearchRequest>) SearchRequest::new);
this.searchPhaseController = searchPhaseController;
this.searchTransportService = searchTransportService;
this.remoteClusterService = searchTransportService.getRemoteClusterService();
SearchTransportService.registerRequestHandler(transportService, searchService);
this.clusterService = clusterService;
this.searchService = searchService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
}
private Map<String, AliasFilter> buildPerIndexAliasFilter(SearchRequest request, ClusterState clusterState,

View File

@ -22,9 +22,9 @@ package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
@ -43,10 +43,9 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
@Inject
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ClusterService clusterService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController) {
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, SearchScrollRequest::new,
indexNameExpressionResolver);
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters,
(Writeable.Reader<SearchScrollRequest>) SearchScrollRequest::new);
this.clusterService = clusterService;
this.searchTransportService = searchTransportService;
this.searchPhaseController = searchPhaseController;

View File

@ -22,7 +22,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
@ -39,29 +38,28 @@ import java.util.function.Supplier;
public abstract class HandledTransportAction<Request extends ActionRequest, Response extends ActionResponse>
extends TransportAction<Request, Response> {
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ActionFilters actionFilters,
Supplier<Request> request) {
this(settings, actionName, true, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
this(settings, actionName, true, threadPool, transportService, actionFilters, request);
}
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, Writeable.Reader<Request> requestReader,
IndexNameExpressionResolver indexNameExpressionResolver) {
this(settings, actionName, true, threadPool, transportService, actionFilters, requestReader, indexNameExpressionResolver);
ActionFilters actionFilters, Writeable.Reader<Request> requestReader) {
this(settings, actionName, true, threadPool, transportService, actionFilters, requestReader);
}
protected HandledTransportAction(Settings settings, String actionName, boolean canTripCircuitBreaker, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
Supplier<Request> request) {
super(settings, actionName, threadPool, actionFilters, transportService.getTaskManager());
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, false, canTripCircuitBreaker,
new TransportHandler());
}
protected HandledTransportAction(Settings settings, String actionName, boolean canTripCircuitBreaker, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
Writeable.Reader<Request> requestReader, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
Writeable.Reader<Request> requestReader) {
super(settings, actionName, threadPool, actionFilters, transportService.getTaskManager());
transportService.registerRequestHandler(actionName, ThreadPool.Names.SAME, false, canTripCircuitBreaker, requestReader,
new TransportHandler());
}

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
@ -39,16 +38,14 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
protected final ThreadPool threadPool;
protected final String actionName;
private final ActionFilter[] filters;
protected final IndexNameExpressionResolver indexNameExpressionResolver;
protected final TaskManager taskManager;
protected TransportAction(Settings settings, String actionName, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, TaskManager taskManager) {
TaskManager taskManager) {
super(settings);
this.threadPool = threadPool;
this.actionName = actionName;
this.filters = actionFilters.filters();
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.taskManager = taskManager;
}

View File

@ -54,15 +54,17 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
protected final ClusterService clusterService;
protected final TransportService transportService;
protected final IndexNameExpressionResolver indexNameExpressionResolver;
final String transportShardAction;
protected TransportBroadcastAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<Request> request, Supplier<ShardRequest> shardRequest, String shardExecutor) {
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
super(settings, actionName, threadPool, transportService, actionFilters, request);
this.clusterService = clusterService;
this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.transportShardAction = actionName + "[s]";
transportService.registerRequestHandler(transportShardAction, shardRequest, shardExecutor, new ShardTransportHandler());

View File

@ -81,6 +81,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
private final ClusterService clusterService;
private final TransportService transportService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
final String transportNodeBroadcastAction;
@ -109,11 +110,12 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
Supplier<Request> request,
String executor,
boolean canTripCircuitBreaker) {
super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters, indexNameExpressionResolver,
super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters,
request);
this.clusterService = clusterService;
this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
transportNodeBroadcastAction = actionName + "[n]";

View File

@ -56,6 +56,7 @@ import java.util.function.Supplier;
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest<Request>, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
protected final TransportService transportService;
protected final ClusterService clusterService;
protected final IndexNameExpressionResolver indexNameExpressionResolver;
final String executor;
@ -74,10 +75,11 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
protected TransportMasterNodeAction(Settings settings, String actionName, boolean canTripCircuitBreaker,
TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters, indexNameExpressionResolver,
super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters,
request);
this.transportService = transportService;
this.clusterService = clusterService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.executor = executor();
}
@ -85,10 +87,11 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ActionFilters actionFilters, Writeable.Reader<Request> request,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters, request,
indexNameExpressionResolver);
super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters, request
);
this.transportService = transportService;
this.clusterService = clusterService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.executor = executor();
}

View File

@ -26,7 +26,6 @@ import org.elasticsearch.action.NoSuchNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
@ -63,11 +62,9 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
protected TransportNodesAction(Settings settings, String actionName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<NodesRequest> request, Supplier<NodeRequest> nodeRequest,
String nodeExecutor,
Supplier<NodesRequest> request, Supplier<NodeRequest> nodeRequest, String nodeExecutor,
Class<NodeResponse> nodeResponseClass) {
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
super(settings, actionName, threadPool, transportService, actionFilters, request);
this.clusterService = Objects.requireNonNull(clusterService);
this.transportService = Objects.requireNonNull(transportService);
this.nodeResponseClass = Objects.requireNonNull(nodeResponseClass);

View File

@ -56,13 +56,15 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
private final TransportReplicationAction replicatedBroadcastShardAction;
private final ClusterService clusterService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
public TransportBroadcastReplicationAction(String name, Supplier<Request> request, Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportReplicationAction replicatedBroadcastShardAction) {
super(settings, name, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
super(settings, name, threadPool, transportService, actionFilters, request);
this.replicatedBroadcastShardAction = replicatedBroadcastShardAction;
this.clusterService = clusterService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
}

View File

@ -104,6 +104,7 @@ public abstract class TransportReplicationAction<
protected final ClusterService clusterService;
protected final ShardStateAction shardStateAction;
protected final IndicesService indicesService;
protected final IndexNameExpressionResolver indexNameExpressionResolver;
protected final TransportRequestOptions transportOptions;
protected final String executor;
@ -131,11 +132,12 @@ public abstract class TransportReplicationAction<
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request,
Supplier<ReplicaRequest> replicaRequest, String executor,
boolean syncGlobalCheckpointAfterOperation) {
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
super(settings, actionName, threadPool, actionFilters, transportService.getTaskManager());
this.transportService = transportService;
this.clusterService = clusterService;
this.indicesService = indicesService;
this.shardStateAction = shardStateAction;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.executor = executor;
this.transportPrimaryAction = actionName + "[p]";

View File

@ -52,6 +52,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
extends HandledTransportAction<Request, Response> {
protected final ClusterService clusterService;
protected final TransportService transportService;
protected final IndexNameExpressionResolver indexNameExpressionResolver;
final String executor;
final String shardActionName;
@ -59,9 +60,10 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
protected TransportInstanceSingleOperationAction(Settings settings, String actionName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
super(settings, actionName, threadPool, transportService, actionFilters, request);
this.clusterService = clusterService;
this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.executor = executor();
this.shardActionName = actionName + "[s]";
transportService.registerRequestHandler(shardActionName, request, executor, new ShardTransportHandler());

View File

@ -61,8 +61,8 @@ import static org.elasticsearch.action.support.TransportActions.isShardNotAvaila
public abstract class TransportSingleShardAction<Request extends SingleShardRequest<Request>, Response extends ActionResponse> extends TransportAction<Request, Response> {
protected final ClusterService clusterService;
protected final TransportService transportService;
protected final IndexNameExpressionResolver indexNameExpressionResolver;
final String transportShardAction;
final String executor;
@ -70,9 +70,10 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
protected TransportSingleShardAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<Request> request, String executor) {
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
super(settings, actionName, threadPool, actionFilters, transportService.getTaskManager());
this.clusterService = clusterService;
this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.transportShardAction = actionName + "[s]";
this.executor = executor;

View File

@ -28,7 +28,6 @@ import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
@ -78,12 +77,10 @@ public abstract class TransportTasksAction<
protected final String transportNodeAction;
protected TransportTasksAction(Settings settings, String actionName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<TasksRequest> requestSupplier,
Supplier<TasksResponse> responseSupplier,
String nodeExecutor) {
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, requestSupplier);
protected TransportTasksAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, Supplier<TasksRequest> requestSupplier,
Supplier<TasksResponse> responseSupplier, String nodeExecutor) {
super(settings, actionName, threadPool, transportService, actionFilters, requestSupplier);
this.clusterService = clusterService;
this.transportService = transportService;
this.transportNodeAction = actionName + "[n]";

View File

@ -41,16 +41,17 @@ import java.util.concurrent.atomic.AtomicInteger;
public class TransportMultiTermVectorsAction extends HandledTransportAction<MultiTermVectorsRequest, MultiTermVectorsResponse> {
private final ClusterService clusterService;
private final TransportShardMultiTermsVectorAction shardAction;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportMultiTermVectorsAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ClusterService clusterService, TransportShardMultiTermsVectorAction shardAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, MultiTermVectorsAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiTermVectorsRequest::new);
super(settings, MultiTermVectorsAction.NAME, threadPool, transportService, actionFilters, MultiTermVectorsRequest::new);
this.clusterService = clusterService;
this.shardAction = shardAction;
this.indexNameExpressionResolver = indexNameExpressionResolver;
}
@Override

View File

@ -30,7 +30,6 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest;
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
@ -56,12 +55,10 @@ public class TransportNodesListGatewayMetaState extends TransportNodesAction<Tra
private final GatewayMetaState metaState;
@Inject
public TransportNodesListGatewayMetaState(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
GatewayMetaState metaState) {
public TransportNodesListGatewayMetaState(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, GatewayMetaState metaState) {
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, Request::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeGatewayMetaState.class);
Request::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeGatewayMetaState.class);
this.metaState = metaState;
}

View File

@ -32,7 +32,6 @@ import org.elasticsearch.action.support.nodes.BaseNodesResponse;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
@ -72,14 +71,11 @@ public class TransportNodesListGatewayStartedShards extends
private final IndicesService indicesService;
@Inject
public TransportNodesListGatewayStartedShards(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
public TransportNodesListGatewayStartedShards(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters,
NodeEnvironment env, IndicesService indicesService) {
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, Request::new, NodeRequest::new, ThreadPool.Names.FETCH_SHARD_STARTED,
NodeGatewayStartedShards.class);
Request::new, NodeRequest::new, ThreadPool.Names.FETCH_SHARD_STARTED, NodeGatewayStartedShards.class);
this.nodeEnv = env;
this.indicesService = indicesService;
}

View File

@ -166,7 +166,18 @@ public final class AnalysisRegistry implements Closeable {
*/
tokenFilters.put("synonym", requiresAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)));
tokenFilters.put("synonym_graph", requiresAnalysisSettings((is, env, name, settings) -> new SynonymGraphTokenFilterFactory(is, env, this, name, settings)));
return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters);
Map<String, TokenFilterFactory> mappings
= buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters);
// ReferringTokenFilters require references to other tokenfilters, so we pass these in
// after all factories have been registered
for (TokenFilterFactory tff : mappings.values()) {
if (tff instanceof ReferringFilterFactory) {
((ReferringFilterFactory)tff).setReferences(mappings);
}
}
return mappings;
}
public Map<String, TokenizerFactory> buildTokenizerFactories(IndexSettings indexSettings) throws IOException {

View File

@ -0,0 +1,37 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import java.util.Map;
/**
* Marks a {@link TokenFilterFactory} that refers to other filter factories.
*
* The analysis registry will call {@link #setReferences(Map)} with a map of all
* available TokenFilterFactories after all factories have been registered
*/
public interface ReferringFilterFactory {
/**
* Called with a map of all registered filter factories
*/
void setReferences(Map<String, TokenFilterFactory> factories);
}

View File

@ -30,7 +30,6 @@ import org.elasticsearch.action.support.nodes.BaseNodesResponse;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
@ -74,9 +73,8 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
@Inject
public TransportNodesListShardStoreMetaData(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
IndicesService indicesService, NodeEnvironment nodeEnv, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
IndicesService indicesService, NodeEnvironment nodeEnv, ActionFilters actionFilters) {
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters,
Request::new, NodeRequest::new, ThreadPool.Names.FETCH_SHARD_STORE, NodeStoreFilesMetaData.class);
this.indicesService = indicesService;
this.nodeEnv = nodeEnv;

View File

@ -25,6 +25,8 @@ import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.joda.DateMathParser;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.rounding.DateTimeUnit;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.unit.TimeValue;
@ -36,7 +38,6 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MappedFieldType.Relation;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
import org.elasticsearch.search.aggregations.AggregatorFactory;
@ -59,6 +60,7 @@ import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
@ -70,6 +72,7 @@ import static java.util.Collections.unmodifiableMap;
public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder<ValuesSource.Numeric, DateHistogramAggregationBuilder>
implements MultiBucketAggregationBuilder {
public static final String NAME = "date_histogram";
private static DateMathParser EPOCH_MILLIS_PARSER = new DateMathParser(Joda.forPattern("epoch_millis", Locale.ROOT));
public static final Map<String, DateTimeUnit> DATE_FIELD_UNITS;
@ -380,7 +383,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil
Long anyInstant = null;
final IndexNumericFieldData fieldData = context.getForField(ft);
for (LeafReaderContext ctx : reader.leaves()) {
AtomicNumericFieldData leafFD = ((IndexNumericFieldData) fieldData).load(ctx);
AtomicNumericFieldData leafFD = fieldData.load(ctx);
SortedNumericDocValues values = leafFD.getLongValues();
if (values.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
anyInstant = values.nextValue();
@ -406,11 +409,8 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil
// rounding rounds down, so 'nextTransition' is a good upper bound
final long high = nextTransition;
final DocValueFormat format = ft.docValueFormat(null, null);
final Object formattedLow = format.format(low);
final Object formattedHigh = format.format(high);
if (ft.isFieldWithinQuery(reader, formattedLow, formattedHigh,
true, false, tz, null, context) == Relation.WITHIN) {
if (ft.isFieldWithinQuery(reader, low, high, true, false, DateTimeZone.UTC, EPOCH_MILLIS_PARSER,
context) == Relation.WITHIN) {
// All values in this reader have the same offset despite daylight saving times.
// This is very common for location-based timezones such as Europe/Paris in
// combination with time-based indices.

View File

@ -81,7 +81,7 @@ public class ActionModuleTests extends ESTestCase {
class FakeTransportAction extends TransportAction<FakeRequest, ActionResponse> {
protected FakeTransportAction(Settings settings, String actionName, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, TaskManager taskManager) {
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, taskManager);
super(settings, actionName, threadPool, actionFilters, taskManager);
}
@Override

View File

@ -32,7 +32,6 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@ -148,8 +147,8 @@ public abstract class TaskManagerTestCase extends ESTestCase {
ClusterService clusterService, TransportService transportService, Supplier<NodesRequest> request,
Supplier<NodeRequest> nodeRequest) {
super(settings, actionName, threadPool, clusterService, transportService,
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY),
request, nodeRequest, ThreadPool.Names.GENERIC, NodeResponse.class);
new ActionFilters(new HashSet<>()),
request, nodeRequest, ThreadPool.Names.GENERIC, NodeResponse.class);
}
@Override
@ -192,12 +191,10 @@ public abstract class TaskManagerTestCase extends ESTestCase {
transportService.start();
clusterService = createClusterService(threadPool, discoveryNode.get());
clusterService.addStateApplier(transportService.getTaskManager());
IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
ActionFilters actionFilters = new ActionFilters(emptySet());
transportListTasksAction = new TransportListTasksAction(settings, threadPool, clusterService, transportService,
actionFilters, indexNameExpressionResolver);
transportListTasksAction = new TransportListTasksAction(settings, threadPool, clusterService, transportService, actionFilters);
transportCancelTasksAction = new TransportCancelTasksAction(settings, threadPool, clusterService,
transportService, actionFilters, indexNameExpressionResolver);
transportService, actionFilters);
transportService.acceptIncomingRequests();
}

View File

@ -37,7 +37,6 @@ import org.elasticsearch.action.support.tasks.BaseTasksResponse;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
@ -269,8 +268,8 @@ public class TestTaskPlugin extends Plugin implements ActionPlugin {
public TransportTestTaskAction(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService) {
super(settings, TestTaskAction.NAME, threadPool, clusterService, transportService,
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY),
NodesRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeResponse.class);
new ActionFilters(new HashSet<>()),
NodesRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeResponse.class);
}
@Override
@ -429,7 +428,7 @@ public class TestTaskPlugin extends Plugin implements ActionPlugin {
clusterService,
TransportService transportService) {
super(settings, UnblockTestTasksAction.NAME, threadPool, clusterService, transportService, new ActionFilters(new
HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY),
HashSet<>()),
UnblockTestTasksRequest::new, UnblockTestTasksResponse::new, ThreadPool.Names.MANAGEMENT);
}

View File

@ -36,7 +36,6 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest;
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
@ -258,7 +257,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
protected TestTasksAction(Settings settings, String actionName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService) {
super(settings, actionName, threadPool, clusterService, transportService, new ActionFilters(new HashSet<>()),
new IndexNameExpressionResolver(Settings.EMPTY), TestTasksRequest::new, TestTasksResponse::new,
TestTasksRequest::new, TestTasksResponse::new,
ThreadPool.Names.MANAGEMENT);
}

View File

@ -26,7 +26,6 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestStatus;
@ -70,7 +69,7 @@ public class MainActionTests extends ESTestCase {
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
x -> null, null, Collections.emptySet());
TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), transportService, mock(ActionFilters.class),
mock(IndexNameExpressionResolver.class), clusterService);
clusterService);
AtomicReference<MainResponse> responseRef = new AtomicReference<>();
action.doExecute(new MainRequest(), new ActionListener<MainResponse>() {
@Override

View File

@ -149,7 +149,7 @@ public class MultiSearchActionTookTests extends ESTestCase {
final Set<SearchRequest> requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>()));
TransportAction<SearchRequest, SearchResponse> searchAction = new TransportAction<SearchRequest, SearchResponse>(Settings.EMPTY,
"action", threadPool, actionFilters, resolver, taskManager) {
"action", threadPool, actionFilters, taskManager) {
@Override
protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
requests.add(request);
@ -161,7 +161,7 @@ public class MultiSearchActionTookTests extends ESTestCase {
};
if (controlledClock) {
return new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, resolver,
return new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction,
availableProcessors, expected::get) {
@Override
void executeSearch(final Queue<SearchRequestSlot> requests, final AtomicArray<MultiSearchResponse.Item> responses,
@ -171,7 +171,7 @@ public class MultiSearchActionTookTests extends ESTestCase {
}
};
} else {
return new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, resolver,
return new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction,
availableProcessors, System::nanoTime) {
@Override

View File

@ -108,7 +108,7 @@ public class TransportMultiSearchActionTests extends ESTestCase {
final ExecutorService rarelyExecutor = threadPool.executor(threadPoolNames.get(1));
final Set<SearchRequest> requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>()));
TransportAction<SearchRequest, SearchResponse> searchAction = new TransportAction<SearchRequest, SearchResponse>
(Settings.EMPTY, "action", threadPool, actionFilters, resolver, taskManager) {
(Settings.EMPTY, "action", threadPool, actionFilters, taskManager) {
@Override
protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
requests.add(request);
@ -126,7 +126,7 @@ public class TransportMultiSearchActionTests extends ESTestCase {
};
TransportMultiSearchAction action =
new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, resolver, 10,
new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, 10,
System::nanoTime);
// Execute the multi search api and fail if we find an error after executing:

View File

@ -80,7 +80,7 @@ public class TransportActionFilterChainTests extends ESTestCase {
String actionName = randomAlphaOfLength(randomInt(30));
ActionFilters actionFilters = new ActionFilters(filters);
TransportAction<TestRequest, TestResponse> transportAction =
new TransportAction<TestRequest, TestResponse>(Settings.EMPTY, actionName, null, actionFilters, null,
new TransportAction<TestRequest, TestResponse>(Settings.EMPTY, actionName, null, actionFilters,
new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) {
@Override
protected void doExecute(TestRequest request, ActionListener<TestResponse> listener) {
@ -158,7 +158,7 @@ public class TransportActionFilterChainTests extends ESTestCase {
String actionName = randomAlphaOfLength(randomInt(30));
ActionFilters actionFilters = new ActionFilters(filters);
TransportAction<TestRequest, TestResponse> transportAction = new TransportAction<TestRequest, TestResponse>(Settings.EMPTY,
actionName, null, actionFilters, null, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) {
actionName, null, actionFilters, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) {
@Override
protected void doExecute(TestRequest request, ActionListener<TestResponse> listener) {
listener.onResponse(new TestResponse());

View File

@ -250,7 +250,7 @@ public class TransportNodesActionTests extends ESTestCase {
transportService, ActionFilters actionFilters, Supplier<TestNodesRequest> request,
Supplier<TestNodeRequest> nodeRequest, String nodeExecutor) {
super(settings, "indices:admin/test", threadPool, clusterService, transportService, actionFilters,
null, request, nodeRequest, nodeExecutor, TestNodeResponse.class);
request, nodeRequest, nodeExecutor, TestNodeResponse.class);
}
@Override

View File

@ -59,7 +59,7 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTestCase {
private static class InternalTransportAction extends TransportAction {
private InternalTransportAction(Settings settings, String actionName, ThreadPool threadPool) {
super(settings, actionName, threadPool, EMPTY_FILTERS, null, new TaskManager(settings, threadPool, Collections.emptySet()));
super(settings, actionName, threadPool, EMPTY_FILTERS, new TaskManager(settings, threadPool, Collections.emptySet()));
}
@Override

View File

@ -20,7 +20,6 @@
package org.elasticsearch.index.analysis;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.en.EnglishAnalyzer;

View File

@ -514,8 +514,8 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P
public TransportTestTaskAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, String nodeExecutor) {
super(settings, TestTaskAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
TestTasksRequest::new, TestTasksResponse::new, ThreadPool.Names.MANAGEMENT);
super(settings, TestTaskAction.NAME, threadPool, clusterService, transportService, actionFilters,
TestTasksRequest::new, TestTasksResponse::new, ThreadPool.Names.MANAGEMENT);
}
@Override

View File

@ -34,6 +34,7 @@ import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds;
@ -41,7 +42,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
import org.elasticsearch.search.aggregations.metrics.avg.Avg;
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.test.ESIntegTestCase;
import org.hamcrest.Matchers;
import org.joda.time.DateTime;
@ -1341,6 +1341,38 @@ public class DateHistogramIT extends ESIntegTestCase {
}
}
/**
* https://github.com/elastic/elasticsearch/issues/31392 demonstrates an edge case where a date field mapping with
* "format" = "epoch_millis" can lead for the date histogram aggregation to throw an error if a non-UTC time zone
* with daylight savings time is used. This test was added to check this is working now
* @throws ExecutionException
* @throws InterruptedException
*/
public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, ExecutionException {
String index = "test31392";
assertAcked(client().admin().indices().prepareCreate(index).addMapping("type", "d", "type=date,format=epoch_millis").get());
indexRandom(true, client().prepareIndex(index, "type").setSource("d", "1477954800000"));
ensureSearchable(index);
SearchResponse response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d")
.dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(DateTimeZone.forID("Europe/Berlin"))).execute().actionGet();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo.getBuckets().size(), equalTo(1));
assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000"));
assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L));
response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d")
.dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(DateTimeZone.forID("Europe/Berlin")).format("yyyy-MM-dd"))
.execute().actionGet();
assertSearchResponse(response);
histo = response.getAggregations().get("histo");
assertThat(histo.getBuckets().size(), equalTo(1));
assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2016-11-01"));
assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L));
internalCluster().wipeIndices(index);
}
/**
* When DST ends, local time turns back one hour, so between 2am and 4am wall time we should have four buckets:
* "2015-10-25T02:00:00.000+02:00",

View File

@ -9,13 +9,6 @@ apply plugin: 'elasticsearch.docs-test'
* only remove entries from this list. When it is empty we'll remove it
* entirely and have a party! There will be cake and everything.... */
buildRestTests.expectedUnconvertedCandidates = [
'en/ml/functions/count.asciidoc',
'en/ml/functions/geo.asciidoc',
'en/ml/functions/info.asciidoc',
'en/ml/functions/metric.asciidoc',
'en/ml/functions/rare.asciidoc',
'en/ml/functions/sum.asciidoc',
'en/ml/functions/time.asciidoc',
'en/rest-api/watcher/put-watch.asciidoc',
'en/security/authentication/user-cache.asciidoc',
'en/security/authorization/field-and-document-access-control.asciidoc',
@ -56,7 +49,6 @@ buildRestTests.expectedUnconvertedCandidates = [
'en/watcher/troubleshooting.asciidoc',
'en/rest-api/license/delete-license.asciidoc',
'en/rest-api/license/update-license.asciidoc',
'en/ml/api-quickref.asciidoc',
'en/rest-api/ml/delete-snapshot.asciidoc',
'en/rest-api/ml/forecast.asciidoc',
'en/rest-api/ml/get-bucket.asciidoc',

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-configuring-aggregation]]
=== Aggregating Data For Faster Performance
=== Aggregating data for faster performance
By default, {dfeeds} fetch data from {es} using search and scroll requests.
It can be significantly more efficient, however, to aggregate data in {es}

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-api-quickref]]
== API Quick Reference
== API quick reference
All {ml} endpoints have the following base:
@ -7,6 +8,7 @@ All {ml} endpoints have the following base:
----
/_xpack/ml/
----
// NOTCONSOLE
The main {ml} resources can be accessed with a variety of endpoints:

View File

@ -1,3 +1,4 @@
[role="xpack"]
[[ml-configuring-categories]]
=== Categorizing log messages
@ -77,7 +78,7 @@ NOTE: To add the `categorization_examples_limit` property, you must use the
[float]
[[ml-configuring-analyzer]]
==== Customizing the Categorization Analyzer
==== Customizing the categorization analyzer
Categorization uses English dictionary words to identify log message categories.
By default, it also uses English tokenization rules. For this reason, if you use
@ -213,7 +214,7 @@ API examples above.
[float]
[[ml-viewing-categories]]
==== Viewing Categorization Results
==== Viewing categorization results
After you open the job and start the {dfeed} or supply data to the job, you can
view the categorization results in {kib}. For example:

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-configuring]]
== Configuring Machine Learning
== Configuring machine learning
If you want to use {xpackml} features, there must be at least one {ml} node in
your cluster and all master-eligible nodes must have {ml} enabled. By default,

View File

@ -48,7 +48,7 @@ using the {ml} APIs.
[float]
[[ml-configuring-url-strings]]
==== String Substitution in Custom URLs
==== String substitution in custom URLs
You can use dollar sign ($) delimited tokens in a custom URL. These tokens are
substituted for the values of the corresponding fields in the anomaly records.

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-functions]]
== Function Reference
== Function reference
The {xpackml} features include analysis functions that provide a wide variety of
flexible ways to analyze data for anomalies.

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-count-functions]]
=== Count Functions
=== Count functions
Count functions detect anomalies when the number of events in a bucket is
anomalous.
@ -21,7 +22,7 @@ The {xpackml} features include the following count functions:
[float]
[[ml-count]]
===== Count, High_count, Low_count
===== Count, high_count, low_count
The `count` function detects anomalies when the number of events in a bucket is
anomalous.
@ -44,8 +45,20 @@ see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]
.Example 1: Analyzing events with the count function
[source,js]
--------------------------------------------------
{ "function" : "count" }
PUT _xpack/ml/anomaly_detectors/example1
{
"analysis_config": {
"detectors": [{
"function" : "count"
}]
},
"data_description": {
"time_field":"timestamp",
"time_format": "epoch_ms"
}
}
--------------------------------------------------
// CONSOLE
This example is probably the simplest possible analysis. It identifies
time buckets during which the overall count of events is higher or lower than
@ -57,12 +70,22 @@ and detects when the event rate is unusual compared to its past behavior.
.Example 2: Analyzing errors with the high_count function
[source,js]
--------------------------------------------------
PUT _xpack/ml/anomaly_detectors/example2
{
"function" : "high_count",
"by_field_name" : "error_code",
"over_field_name": "user"
"analysis_config": {
"detectors": [{
"function" : "high_count",
"by_field_name" : "error_code",
"over_field_name": "user"
}]
},
"data_description": {
"time_field":"timestamp",
"time_format": "epoch_ms"
}
}
--------------------------------------------------
// CONSOLE
If you use this `high_count` function in a detector in your job, it
models the event rate for each error code. It detects users that generate an
@ -72,11 +95,21 @@ unusually high count of error codes compared to other users.
.Example 3: Analyzing status codes with the low_count function
[source,js]
--------------------------------------------------
PUT _xpack/ml/anomaly_detectors/example3
{
"function" : "low_count",
"by_field_name" : "status_code"
"analysis_config": {
"detectors": [{
"function" : "low_count",
"by_field_name" : "status_code"
}]
},
"data_description": {
"time_field":"timestamp",
"time_format": "epoch_ms"
}
}
--------------------------------------------------
// CONSOLE
In this example, the function detects when the count of events for a
status code is lower than usual.
@ -88,22 +121,30 @@ compared to its past behavior.
.Example 4: Analyzing aggregated data with the count function
[source,js]
--------------------------------------------------
PUT _xpack/ml/anomaly_detectors/example4
{
"summary_count_field_name" : "events_per_min",
"detectors" [
{ "function" : "count" }
]
}
"analysis_config": {
"summary_count_field_name" : "events_per_min",
"detectors": [{
"function" : "count"
}]
},
"data_description": {
"time_field":"timestamp",
"time_format": "epoch_ms"
}
}
--------------------------------------------------
// CONSOLE
If you are analyzing an aggregated `events_per_min` field, do not use a sum
function (for example, `sum(events_per_min)`). Instead, use the count function
and the `summary_count_field_name` property.
//TO-DO: For more information, see <<aggreggations.asciidoc>>.
and the `summary_count_field_name` property. For more information, see
<<ml-configuring-aggregation>>.
[float]
[[ml-nonzero-count]]
===== Non_zero_count, High_non_zero_count, Low_non_zero_count
===== Non_zero_count, high_non_zero_count, low_non_zero_count
The `non_zero_count` function detects anomalies when the number of events in a
bucket is anomalous, but it ignores cases where the bucket count is zero. Use
@ -144,11 +185,21 @@ The `non_zero_count` function models only the following data:
.Example 5: Analyzing signatures with the high_non_zero_count function
[source,js]
--------------------------------------------------
PUT _xpack/ml/anomaly_detectors/example5
{
"function" : "high_non_zero_count",
"by_field_name" : "signaturename"
"analysis_config": {
"detectors": [{
"function" : "high_non_zero_count",
"by_field_name" : "signaturename"
}]
},
"data_description": {
"time_field":"timestamp",
"time_format": "epoch_ms"
}
}
--------------------------------------------------
// CONSOLE
If you use this `high_non_zero_count` function in a detector in your job, it
models the count of events for the `signaturename` field. It ignores any buckets
@ -163,7 +214,7 @@ data is sparse, use the `count` functions, which are optimized for that scenario
[float]
[[ml-distinct-count]]
===== Distinct_count, High_distinct_count, Low_distinct_count
===== Distinct_count, high_distinct_count, low_distinct_count
The `distinct_count` function detects anomalies where the number of distinct
values in one field is unusual.
@ -187,11 +238,21 @@ see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]
.Example 6: Analyzing users with the distinct_count function
[source,js]
--------------------------------------------------
PUT _xpack/ml/anomaly_detectors/example6
{
"function" : "distinct_count",
"field_name" : "user"
"analysis_config": {
"detectors": [{
"function" : "distinct_count",
"field_name" : "user"
}]
},
"data_description": {
"time_field":"timestamp",
"time_format": "epoch_ms"
}
}
--------------------------------------------------
// CONSOLE
This `distinct_count` function detects when a system has an unusual number
of logged in users. When you use this function in a detector in your job, it
@ -201,12 +262,22 @@ users is unusual compared to the past.
.Example 7: Analyzing ports with the high_distinct_count function
[source,js]
--------------------------------------------------
PUT _xpack/ml/anomaly_detectors/example7
{
"function" : "high_distinct_count",
"field_name" : "dst_port",
"over_field_name": "src_ip"
"analysis_config": {
"detectors": [{
"function" : "high_distinct_count",
"field_name" : "dst_port",
"over_field_name": "src_ip"
}]
},
"data_description": {
"time_field":"timestamp",
"time_format": "epoch_ms"
}
}
--------------------------------------------------
// CONSOLE
This example detects instances of port scanning. When you use this function in a
detector in your job, it models the distinct count of ports. It also detects the

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-geo-functions]]
=== Geographic Functions
=== Geographic functions
The geographic functions detect anomalies in the geographic location of the
input data.
@ -28,12 +29,22 @@ see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]
.Example 1: Analyzing transactions with the lat_long function
[source,js]
--------------------------------------------------
PUT _xpack/ml/anomaly_detectors/example1
{
"function" : "lat_long",
"field_name" : "transactionCoordinates",
"by_field_name" : "creditCardNumber"
"analysis_config": {
"detectors": [{
"function" : "lat_long",
"field_name" : "transactionCoordinates",
"by_field_name" : "creditCardNumber"
}]
},
"data_description": {
"time_field":"timestamp",
"time_format": "epoch_ms"
}
}
--------------------------------------------------
// CONSOLE
If you use this `lat_long` function in a detector in your job, it
detects anomalies where the geographic location of a credit card transaction is
@ -54,6 +65,7 @@ For example, JSON data might contain the following transaction coordinates:
"creditCardNumber": "1234123412341234"
}
--------------------------------------------------
// NOTCONSOLE
In {es}, location data is likely to be stored in `geo_point` fields. For more
information, see {ref}/geo-point.html[Geo-point datatype]. This data type is not
@ -64,7 +76,15 @@ format. For example, the following Painless script transforms
[source,js]
--------------------------------------------------
PUT _xpack/ml/datafeeds/datafeed-test2
{
"job_id": "farequote",
"indices": ["farequote"],
"query": {
"match_all": {
"boost": 1
}
},
"script_fields": {
"lat-lon": {
"script": {
@ -75,5 +95,7 @@ format. For example, the following Painless script transforms
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:farequote_job]
For more information, see <<ml-configuring-transform>>.

View File

@ -40,6 +40,7 @@ For more information about those properties, see
"over_field_name" : "highest_registered_domain"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `info_content` function in a detector in your job, it models
information that is present in the `subdomain` string. It detects anomalies
@ -60,6 +61,7 @@ choice.
"over_field_name" : "src_ip"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `high_info_content` function in a detector in your job, it
models information content that is held in the DNS query string. It detects
@ -77,6 +79,7 @@ information content is higher than expected.
"by_field_name" : "logfilename"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `low_info_content` function in a detector in your job, it models
information content that is present in the message string for each

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-metric-functions]]
=== Metric Functions
=== Metric functions
The metric functions include functions such as mean, min and max. These values
are calculated for each bucket. Field values that cannot be converted to
@ -42,6 +43,7 @@ For more information about those properties, see
"by_field_name" : "product"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `min` function in a detector in your job, it detects where the
smallest transaction is lower than previously observed. You can use this
@ -76,6 +78,7 @@ For more information about those properties, see
"by_field_name" : "application"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `max` function in a detector in your job, it detects where the
longest `responsetime` is longer than previously observed. You can use this
@ -98,6 +101,7 @@ to previous applications.
"by_field_name" : "application"
}
--------------------------------------------------
// NOTCONSOLE
The analysis in the previous example can be performed alongside `high_mean`
functions by application. By combining detectors and using the same influencer
@ -106,7 +110,7 @@ response times for each bucket.
[float]
[[ml-metric-median]]
==== Median, High_median, Low_median
==== Median, high_median, low_median
The `median` function detects anomalies in the statistical median of a value.
The median value is calculated for each bucket.
@ -136,6 +140,7 @@ For more information about those properties, see
"by_field_name" : "application"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `median` function in a detector in your job, it models the
median `responsetime` for each application over time. It detects when the median
@ -143,7 +148,7 @@ median `responsetime` for each application over time. It detects when the median
[float]
[[ml-metric-mean]]
==== Mean, High_mean, Low_mean
==== Mean, high_mean, low_mean
The `mean` function detects anomalies in the arithmetic mean of a value.
The mean value is calculated for each bucket.
@ -173,6 +178,7 @@ For more information about those properties, see
"by_field_name" : "application"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `mean` function in a detector in your job, it models the mean
`responsetime` for each application over time. It detects when the mean
@ -187,6 +193,7 @@ If you use this `mean` function in a detector in your job, it models the mean
"by_field_name" : "application"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `high_mean` function in a detector in your job, it models the
mean `responsetime` for each application over time. It detects when the mean
@ -201,6 +208,7 @@ mean `responsetime` for each application over time. It detects when the mean
"by_field_name" : "application"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `low_mean` function in a detector in your job, it models the
mean `responsetime` for each application over time. It detects when the mean
@ -237,6 +245,7 @@ For more information about those properties, see
"by_field_name" : "application"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `metric` function in a detector in your job, it models the
mean, min, and max `responsetime` for each application over time. It detects
@ -245,7 +254,7 @@ when the mean, min, or max `responsetime` is unusual compared to previous
[float]
[[ml-metric-varp]]
==== Varp, High_varp, Low_varp
==== Varp, high_varp, low_varp
The `varp` function detects anomalies in the variance of a value which is a
measure of the variability and spread in the data.
@ -273,6 +282,7 @@ For more information about those properties, see
"by_field_name" : "application"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `varp` function in a detector in your job, it models the
variance in values of `responsetime` for each application over time. It detects
@ -288,6 +298,7 @@ behavior.
"by_field_name" : "application"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `high_varp` function in a detector in your job, it models the
variance in values of `responsetime` for each application over time. It detects
@ -303,6 +314,7 @@ behavior.
"by_field_name" : "application"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `low_varp` function in a detector in your job, it models the
variance in values of `responsetime` for each application over time. It detects

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-rare-functions]]
=== Rare Functions
=== Rare functions
The rare functions detect values that occur rarely in time or rarely for a
population.
@ -54,6 +55,7 @@ For more information about those properties, see
"by_field_name" : "status"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `rare` function in a detector in your job, it detects values
that are rare in time. It models status codes that occur over time and detects
@ -69,6 +71,7 @@ status codes in a web access log that have never (or rarely) occurred before.
"over_field_name" : "clientip"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `rare` function in a detector in your job, it detects values
that are rare in a population. It models status code and client IP interactions
@ -111,6 +114,7 @@ For more information about those properties, see
"over_field_name" : "clientip"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `freq_rare` function in a detector in your job, it
detects values that are frequently rare in a population. It models URI paths and

View File

@ -1,6 +1,6 @@
[role="xpack"]
[[ml-sum-functions]]
=== Sum Functions
=== Sum functions
The sum functions detect anomalies when the sum of a field in a bucket is anomalous.
@ -16,16 +16,9 @@ The {xpackml} features include the following sum functions:
* xref:ml-sum[`sum`, `high_sum`, `low_sum`]
* xref:ml-nonnull-sum[`non_null_sum`, `high_non_null_sum`, `low_non_null_sum`]
////
TBD: Incorporate from prelert docs?:
Input data may contain pre-calculated fields giving the total count of some value e.g. transactions per minute.
Ensure you are familiar with our advice on Summarization of Input Data, as this is likely to provide
a more appropriate method to using the sum function.
////
[float]
[[ml-sum]]
==== Sum, High_sum, Low_sum
==== Sum, high_sum, low_sum
The `sum` function detects anomalies where the sum of a field in a bucket is
anomalous.
@ -54,6 +47,7 @@ For more information about those properties, see
"over_field_name" : "employee"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `sum` function in a detector in your job, it
models total expenses per employees for each cost center. For each time bucket,
@ -69,6 +63,7 @@ to other employees.
"over_field_name" : "cs_host"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `high_sum` function in a detector in your job, it
models total `cs_bytes`. It detects `cs_hosts` that transfer unusually high
@ -79,7 +74,7 @@ to find users that are abusing internet privileges.
[float]
[[ml-nonnull-sum]]
==== Non_null_sum, High_non_null_sum, Low_non_null_sum
==== Non_null_sum, high_non_null_sum, low_non_null_sum
The `non_null_sum` function is useful if your data is sparse. Buckets without
values are ignored and buckets with a zero value are analyzed.
@ -110,6 +105,7 @@ is not applicable for this function.
"byFieldName" : "employee"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `high_non_null_sum` function in a detector in your job, it
models the total `amount_approved` for each employee. It ignores any buckets

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-time-functions]]
=== Time Functions
=== Time functions
The time functions detect events that happen at unusual times, either of the day
or of the week. These functions can be used to find unusual patterns of behavior,
@ -60,6 +61,7 @@ For more information about those properties, see
"by_field_name" : "process"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `time_of_day` function in a detector in your job, it
models when events occur throughout a day for each process. It detects when an
@ -91,6 +93,7 @@ For more information about those properties, see
"over_field_name" : "workstation"
}
--------------------------------------------------
// NOTCONSOLE
If you use this `time_of_week` function in a detector in your job, it
models when events occur throughout the week for each `eventcode`. It detects

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-configuring-pop]]
=== Performing Population Analysis
=== Performing population analysis
Entities or events in your data can be considered anomalous when:

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[stopping-ml]]
== Stopping Machine Learning
== Stopping machine learning
An orderly shutdown of {ml} ensures that:
@ -24,10 +25,10 @@ request stops the `feed1` {dfeed}:
[source,js]
--------------------------------------------------
POST _xpack/ml/datafeeds/feed1/_stop
POST _xpack/ml/datafeeds/datafeed-total-requests/_stop
--------------------------------------------------
// CONSOLE
// TEST[skip:todo]
// TEST[setup:server_metrics_startdf]
NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}.
For more information, see <<security-privileges>>.
@ -63,10 +64,10 @@ example, the following request closes the `job1` job:
[source,js]
--------------------------------------------------
POST _xpack/ml/anomaly_detectors/job1/_close
POST _xpack/ml/anomaly_detectors/total-requests/_close
--------------------------------------------------
// CONSOLE
// TEST[skip:todo]
// TEST[setup:server_metrics_openjob]
NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}.
For more information, see <<security-privileges>>.

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[ml-configuring-transform]]
=== Transforming Data With Script Fields
=== Transforming data with script fields
If you use {dfeeds}, you can add scripts to transform your data before
it is analyzed. {dfeeds-cap} contain an optional `script_fields` property, where
@ -602,10 +603,3 @@ The preview {dfeed} API returns the following results, which show that
]
----------------------------------
// TESTRESPONSE
////
==== Configuring Script Fields in {dfeeds-cap}
//TO-DO: Add Kibana steps from
//https://github.com/elastic/prelert-legacy/wiki/Transforming-data-with-script_fields#transforming-geo_point-data-to-a-workable-string-format
////

View File

@ -8,7 +8,6 @@ package org.elasticsearch.xpack.core.action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.license.XPackInfoResponse;
@ -31,10 +30,9 @@ public class TransportXPackInfoAction extends HandledTransportAction<XPackInfoRe
@Inject
public TransportXPackInfoAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
LicenseService licenseService, Set<XPackFeatureSet> featureSets) {
super(settings, XPackInfoAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
XPackInfoRequest::new);
ActionFilters actionFilters, LicenseService licenseService, Set<XPackFeatureSet> featureSets) {
super(settings, XPackInfoAction.NAME, threadPool, transportService, actionFilters,
XPackInfoRequest::new);
this.licenseService = licenseService;
this.featureSets = featureSets;
}

View File

@ -8,7 +8,6 @@ package org.elasticsearch.xpack.core.ssl.action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
@ -28,10 +27,9 @@ public class TransportGetCertificateInfoAction extends HandledTransportAction<Ge
@Inject
public TransportGetCertificateInfoAction(Settings settings, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
SSLService sslService) {
super(settings, GetCertificateInfoAction.NAME, threadPool, transportService, actionFilters,
indexNameExpressionResolver, GetCertificateInfoAction.Request::new);
GetCertificateInfoAction.Request::new);
this.sslService = sslService;
}

View File

@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.license.XPackInfoResponse;
import org.elasticsearch.license.License;
@ -56,7 +55,7 @@ public class TransportXPackInfoActionTests extends ESTestCase {
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
x -> null, null, Collections.emptySet());
TransportXPackInfoAction action = new TransportXPackInfoAction(Settings.EMPTY, mock(ThreadPool.class), transportService,
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), licenseService, featureSets);
mock(ActionFilters.class), licenseService, featureSets);
License license = mock(License.class);
long expiryDate = randomLong();

View File

@ -16,7 +16,6 @@ import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.search.TransportSearchAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@ -58,6 +57,7 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Supplier;
/**
* Performs a series of elasticsearch queries and aggregations to explore
@ -83,10 +83,10 @@ public class TransportGraphExploreAction extends HandledTransportAction<GraphExp
@Inject
public TransportGraphExploreAction(Settings settings, ThreadPool threadPool, TransportSearchAction transportSearchAction,
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
TransportService transportService, ActionFilters actionFilters,
XPackLicenseState licenseState) {
super(settings, GraphExploreAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
GraphExploreRequest::new);
super(settings, GraphExploreAction.NAME, threadPool, transportService, actionFilters,
(Supplier<GraphExploreRequest>)GraphExploreRequest::new);
this.searchAction = transportSearchAction;
this.licenseState = licenseState;
}

View File

@ -15,7 +15,6 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
@ -64,13 +63,12 @@ public class TransportCloseJobAction extends TransportTasksAction<TransportOpenJ
private final PersistentTasksService persistentTasksService;
@Inject
public TransportCloseJobAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ClusterService clusterService, Client client,
Auditor auditor, PersistentTasksService persistentTasksService) {
public TransportCloseJobAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
ClusterService clusterService, Client client, Auditor auditor,
PersistentTasksService persistentTasksService) {
// We fork in innerTaskOperation(...), so we can use ThreadPool.Names.SAME here:
super(settings, CloseJobAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, CloseJobAction.Request::new, CloseJobAction.Response::new, ThreadPool.Names.SAME);
CloseJobAction.Request::new, CloseJobAction.Response::new, ThreadPool.Names.SAME);
this.client = client;
this.clusterService = clusterService;
this.auditor = auditor;

View File

@ -5,13 +5,14 @@
*/
package org.elasticsearch.xpack.ml.action;
import java.util.function.Supplier;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilder;
@ -37,12 +38,10 @@ public class TransportDeleteCalendarAction extends HandledTransportAction<Delete
private final JobProvider jobProvider;
@Inject
public TransportDeleteCalendarAction(Settings settings, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
Client client, JobManager jobManager, JobProvider jobProvider) {
public TransportDeleteCalendarAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, Client client, JobManager jobManager, JobProvider jobProvider) {
super(settings, DeleteCalendarAction.NAME, threadPool, transportService, actionFilters,
indexNameExpressionResolver, DeleteCalendarAction.Request::new);
(Supplier<DeleteCalendarAction.Request>) DeleteCalendarAction.Request::new);
this.client = client;
this.jobManager = jobManager;
this.jobProvider = jobProvider;

View File

@ -16,7 +16,6 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestStatus;
@ -44,10 +43,9 @@ public class TransportDeleteCalendarEventAction extends HandledTransportAction<D
@Inject
public TransportDeleteCalendarEventAction(Settings settings, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
Client client, JobProvider jobProvider, JobManager jobManager) {
super(settings, DeleteCalendarEventAction.NAME, threadPool, transportService, actionFilters,
indexNameExpressionResolver, DeleteCalendarEventAction.Request::new);
DeleteCalendarEventAction.Request::new);
this.client = client;
this.jobProvider = jobProvider;
this.jobManager = jobManager;

View File

@ -9,7 +9,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -38,10 +37,8 @@ public class TransportDeleteExpiredDataAction extends HandledTransportAction<Del
@Inject
public TransportDeleteExpiredDataAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
Client client, ClusterService clusterService) {
super(settings, DeleteExpiredDataAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
DeleteExpiredDataAction.Request::new);
ActionFilters actionFilters, Client client, ClusterService clusterService) {
super(settings, DeleteExpiredDataAction.NAME, threadPool, transportService, actionFilters, DeleteExpiredDataAction.Request::new);
this.client = ClientHelper.clientWithOrigin(client, ClientHelper.ML_ORIGIN);
this.clusterService = clusterService;
}

View File

@ -16,7 +16,6 @@ import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -34,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
@ -44,12 +44,10 @@ public class TransportDeleteFilterAction extends HandledTransportAction<DeleteFi
private final ClusterService clusterService;
@Inject
public TransportDeleteFilterAction(Settings settings, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
ClusterService clusterService, Client client) {
public TransportDeleteFilterAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, ClusterService clusterService, Client client) {
super(settings, DeleteFilterAction.NAME, threadPool, transportService, actionFilters,
indexNameExpressionResolver, DeleteFilterAction.Request::new);
(Supplier<DeleteFilterAction.Request>) DeleteFilterAction.Request::new);
this.clusterService = clusterService;
this.client = client;
}

View File

@ -11,7 +11,6 @@ import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -39,10 +38,10 @@ public class TransportDeleteModelSnapshotAction extends HandledTransportAction<D
@Inject
public TransportDeleteModelSnapshotAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ActionFilters actionFilters,
JobProvider jobProvider, ClusterService clusterService, Client client, Auditor auditor) {
super(settings, DeleteModelSnapshotAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
DeleteModelSnapshotAction.Request::new);
super(settings, DeleteModelSnapshotAction.NAME, threadPool, transportService, actionFilters,
DeleteModelSnapshotAction.Request::new);
this.client = client;
this.jobProvider = jobProvider;
this.clusterService = clusterService;

View File

@ -7,7 +7,6 @@ package org.elasticsearch.xpack.ml.action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -26,10 +25,9 @@ public class TransportFlushJobAction extends TransportJobTaskAction<FlushJobActi
@Inject
public TransportFlushJobAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ClusterService clusterService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
AutodetectProcessManager processManager) {
super(settings, FlushJobAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
FlushJobAction.Request::new, FlushJobAction.Response::new, ThreadPool.Names.SAME, processManager);
super(settings, FlushJobAction.NAME, threadPool, clusterService, transportService, actionFilters,
FlushJobAction.Request::new, FlushJobAction.Response::new, ThreadPool.Names.SAME, processManager);
// ThreadPool.Names.SAME, because operations is executed by autodetect worker thread
}

View File

@ -10,7 +10,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -45,10 +44,9 @@ public class TransportForecastJobAction extends TransportJobTaskAction<ForecastJ
@Inject
public TransportForecastJobAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ClusterService clusterService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, JobProvider jobProvider,
AutodetectProcessManager processManager) {
JobProvider jobProvider, AutodetectProcessManager processManager) {
super(settings, ForecastJobAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, ForecastJobAction.Request::new, ForecastJobAction.Response::new,
ForecastJobAction.Request::new, ForecastJobAction.Response::new,
ThreadPool.Names.SAME, processManager);
this.jobProvider = jobProvider;
// ThreadPool.Names.SAME, because operations is executed by autodetect worker thread

View File

@ -5,11 +5,12 @@
*/
package org.elasticsearch.xpack.ml.action;
import java.util.function.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
@ -27,10 +28,9 @@ public class TransportGetBucketsAction extends HandledTransportAction<GetBuckets
@Inject
public TransportGetBucketsAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
JobProvider jobProvider, JobManager jobManager, Client client) {
super(settings, GetBucketsAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
GetBucketsAction.Request::new);
ActionFilters actionFilters, JobProvider jobProvider, JobManager jobManager, Client client) {
super(settings, GetBucketsAction.NAME, threadPool, transportService, actionFilters,
(Supplier<GetBucketsAction.Request>) GetBucketsAction.Request::new);
this.jobProvider = jobProvider;
this.jobManager = jobManager;
this.client = client;

Some files were not shown because too many files have changed in this diff Show More