Merge branch 'master' into doc/plugins-offline-url

# Conflicts:
#	docs/plugins/mapper-attachments.asciidoc
This commit is contained in:
David Pilato 2016-09-19 14:51:55 +02:00
commit 9e58ca835c
618 changed files with 10318 additions and 18544 deletions

View File

@ -120,7 +120,8 @@ Please follow these formatting guidelines:
* The rest is left to Java coding standards
* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them.
* Eclipse: Preferences->Java->Code Style->Organize Imports. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
* Eclipse: `Preferences->Java->Code Style->Organize Imports`. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
* IntelliJ: `Preferences->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value.
* Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so.
To create a distribution from the source, simply run:

View File

@ -364,10 +364,12 @@ These are the linux flavors the Vagrantfile currently supports:
* ubuntu-1204 aka precise
* ubuntu-1404 aka trusty
* ubuntu-1504 aka vivid
* ubuntu-1604 aka xenial
* debian-8 aka jessie, the current debian stable distribution
* centos-6
* centos-7
* fedora-22
* fedora-24
* oel-6 aka Oracle Enterprise Linux 6
* oel-7 aka Oracle Enterprise Linux 7
* sles-12
* opensuse-13
@ -376,7 +378,6 @@ We're missing the following from the support matrix because there aren't high
quality boxes available in vagrant atlas:
* sles-11
* oel-6
We're missing the follow because our tests are very linux/bash centric:

7
Vagrantfile vendored
View File

@ -37,6 +37,13 @@ Vagrant.configure(2) do |config|
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
SHELL
end
config.vm.define "ubuntu-1604" do |config|
config.vm.box = "elastic/ubuntu-16.04-x86_64"
ubuntu_common config, extra: <<-SHELL
# Install Jayatana so we can work around it being present.
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
SHELL
end
# Wheezy's backports don't contain Openjdk 8 and the backflips required to
# get the sun jdk on there just aren't worth it. We have jessie for testing
# debian and it works fine.

View File

@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.settings.Settings;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
@ -160,11 +159,9 @@ public class AllocationBenchmark {
public ClusterState measureAllocation() {
ClusterState clusterState = initialClusterState;
while (clusterState.getRoutingNodes().hasUnassignedShards()) {
RoutingAllocation.Result result = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes()
clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes()
.shardsWithState(ShardRoutingState.INITIALIZING));
clusterState = ClusterState.builder(clusterState).routingResult(result).build();
result = strategy.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingResult(result).build();
clusterState = strategy.reroute(clusterState, "reroute");
}
return clusterState;
}

View File

@ -1,8 +0,0 @@
# Do not log at all if it is not really critical - we're in a benchmark
benchmarks.es.logger.level=ERROR
log4j.rootLogger=${benchmarks.es.logger.level}, out
log4j.appender.out=org.apache.log4j.ConsoleAppender
log4j.appender.out.layout=org.apache.log4j.PatternLayout
log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n

View File

@ -0,0 +1,8 @@
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
# Do not log at all if it is not really critical - we're in a benchmark
rootLogger.level = error
rootLogger.appenderRef.console.ref = console

View File

@ -157,7 +157,7 @@ class BuildPlugin implements Plugin<Project> {
private static String findJavaHome() {
String javaHome = System.getenv('JAVA_HOME')
if (javaHome == null) {
if (System.getProperty("idea.active") != null) {
if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) {
// intellij doesn't set JAVA_HOME, so we use the jdk gradle was run with
javaHome = Jvm.current().javaHome
} else {
@ -405,9 +405,9 @@ class BuildPlugin implements Plugin<Project> {
//options.incremental = true
if (project.javaVersion == JavaVersion.VERSION_1_9) {
// hack until gradle supports java 9's new "-release" arg
// hack until gradle supports java 9's new "--release" arg
assert minimumJava == JavaVersion.VERSION_1_8
options.compilerArgs << '-release' << '8'
options.compilerArgs << '--release' << '8'
project.sourceCompatibility = null
project.targetCompatibility = null
}

View File

@ -148,6 +148,9 @@ class PrecommitTasks {
checkstyleTask.dependsOn(task)
task.dependsOn(copyCheckstyleConf)
task.inputs.file(checkstyleSuppressions)
task.reports {
html.enabled false
}
}
}
return checkstyleTask

View File

@ -1016,14 +1016,6 @@
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptEngineTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptMultiThreadedTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonSecurityTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]AttachmentMapper.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]DateAttachmentMapperTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]EncryptedDocMapperTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]MetadataMapperTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]MultifieldAttachmentMapperTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]SimpleAttachmentMapperTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]StandaloneRunner.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]VariousDocTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapper.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperUpgradeTests.java" checks="LineLength" />

View File

@ -20,4 +20,4 @@ commonscodec = 1.10
hamcrest = 1.3
securemock = 1.2
# benchmark dependencies
jmh = 1.12
jmh = 1.14

View File

@ -72,7 +72,7 @@ public class RestNoopBulkAction extends BaseRestHandler {
}
bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
bulkRequest.setRefreshPolicy(request.param("refresh"));
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultPipeline, null, true);
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, null, defaultPipeline, null, true);
// short circuit the call to the transport layer
BulkRestBuilderListener listener = new BulkRestBuilderListener(channel, request);

View File

@ -216,7 +216,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
public void onFailure(Exception e) {
if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
// We haven't yet created the index for the task results so it can't be found.
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or stored its results", e,
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e,
request.getTaskId()));
} else {
listener.onFailure(e);

View File

@ -31,7 +31,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
@ -111,15 +110,14 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
@Override
public ClusterState execute(ClusterState currentState) {
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.getCommands(), request.explain(),
request.isRetryFailed());
ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
clusterStateToSend = newState;
explanations = routingResult.explanations();
AllocationService.CommandsResult commandsResult =
allocationService.reroute(currentState, request.getCommands(), request.explain(), request.isRetryFailed());
clusterStateToSend = commandsResult.getClusterState();
explanations = commandsResult.explanations();
if (request.dryRun()) {
return currentState;
}
return newState;
return commandsResult.getClusterState();
}
}
}

View File

@ -33,7 +33,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
@ -157,11 +156,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
public ClusterState execute(final ClusterState currentState) {
// now, reroute in case things that require it changed (e.g. number of replicas)
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, "reroute after cluster update settings");
if (!routingResult.changed()) {
return currentState;
}
return ClusterState.builder(currentState).routingResult(routingResult).build();
return allocationService.reroute(currentState, "reroute after cluster update settings");
}
});
}

View File

@ -38,17 +38,11 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
import org.elasticsearch.tasks.Task;
@ -67,25 +61,15 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
*/
public class TransportValidateQueryAction extends TransportBroadcastAction<ValidateQueryRequest, ValidateQueryResponse, ShardValidateQueryRequest, ShardValidateQueryResponse> {
private final IndicesService indicesService;
private final ScriptService scriptService;
private final BigArrays bigArrays;
private final FetchPhase fetchPhase;
private final SearchService searchService;
@Inject
public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
BigArrays bigArrays, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
TransportService transportService, SearchService searchService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH);
this.indicesService = indicesService;
this.scriptService = scriptService;
this.bigArrays = bigArrays;
this.fetchPhase = fetchPhase;
this.searchService = searchService;
}
@Override
@ -161,29 +145,20 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
@Override
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().id());
boolean valid;
String explanation = null;
String error = null;
Engine.Searcher searcher = indexShard.acquireSearcher("validate_query");
DefaultSearchContext searchContext = new DefaultSearchContext(0,
new ShardSearchLocalRequest(request.types(), request.nowInMillis(), request.filteringAliases()), null, searcher,
indexService, indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(),
parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(request.shardId(), request.types(),
request.nowInMillis(), request.filteringAliases());
SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
SearchContext.setCurrent(searchContext);
try {
searchContext.parsedQuery(searchContext.getQueryShardContext().toQuery(request.query()));
searchContext.preProcess();
ParsedQuery parsedQuery = searchContext.getQueryShardContext().toQuery(request.query());
searchContext.parsedQuery(parsedQuery);
searchContext.preProcess(request.rewrite());
valid = true;
if (request.rewrite()) {
explanation = getRewrittenQuery(searcher.searcher(), searchContext.query());
} else if (request.explain()) {
explanation = searchContext.filteredQuery().query().toString();
}
explanation = explain(searchContext, request.rewrite());
} catch (QueryShardException|ParsingException e) {
valid = false;
error = e.getDetailedMessage();
@ -191,19 +166,18 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
valid = false;
error = e.getMessage();
} finally {
searchContext.close();
SearchContext.removeCurrent();
Releasables.close(searchContext, () -> SearchContext.removeCurrent());
}
return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error);
}
private String getRewrittenQuery(IndexSearcher searcher, Query query) throws IOException {
Query queryRewrite = searcher.rewrite(query);
if (queryRewrite instanceof MatchNoDocsQuery) {
return query.toString();
private String explain(SearchContext context, boolean rewritten) throws IOException {
Query query = context.query();
if (rewritten && query instanceof MatchNoDocsQuery) {
return context.parsedQuery().query().toString();
} else {
return queryRewrite.toString();
return query.toString();
}
}
}

View File

@ -293,7 +293,7 @@ public class BulkProcessor implements Closeable {
}
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultPipeline, @Nullable Object payload) throws Exception {
bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, payload, true);
bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true);
executeIfNeeded();
return this;
}

View File

@ -35,12 +35,15 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import java.io.IOException;
import java.util.ArrayList;
@ -57,6 +60,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* @see org.elasticsearch.client.Client#bulk(BulkRequest)
*/
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest, WriteRequest<BulkRequest> {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(BulkRequest.class));
private static final int REQUEST_OVERHEAD = 50;
@ -257,17 +262,17 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
* Adds a framed data in binary format
*/
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
return add(data, defaultIndex, defaultType, null, null, null, null, true);
return add(data, defaultIndex, defaultType, null, null, null, null, null, true);
}
/**
* Adds a framed data in binary format
*/
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws Exception {
return add(data, defaultIndex, defaultType, null, null, null, null, allowExplicitIndex);
return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex);
}
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
XContent xContent = XContentFactory.xContent(data);
int line = 0;
int from = 0;
@ -301,6 +306,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
String id = null;
String routing = defaultRouting;
String parent = null;
FetchSourceContext fetchSourceContext = defaultFetchSourceContext;
String[] fields = defaultFields;
String timestamp = null;
TimeValue ttl = null;
@ -353,16 +359,21 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
pipeline = parser.text();
} else if ("fields".equals(currentFieldName)) {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected");
} else if ("_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.parse(parser);
} else {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
List<Object> values = parser.list();
fields = values.toArray(new String[values.size()]);
} else {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
} else if (token == XContentParser.Token.START_OBJECT && "_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.parse(parser);
} else if (token != XContentParser.Token.VALUE_NULL) {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
@ -402,7 +413,10 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
.version(version).versionType(versionType)
.routing(routing)
.parent(parent)
.source(data.slice(from, nextMarker - from));
.fromXContent(data.slice(from, nextMarker - from));
if (fetchSourceContext != null) {
updateRequest.fetchSource(fetchSourceContext);
}
if (fields != null) {
updateRequest.fields(fields);
}

View File

@ -251,7 +251,8 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
// add the response
IndexResponse indexResponse = result.getResponse();
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult());
if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
}

View File

@ -40,7 +40,7 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
private String routing;
private String preference;
private QueryBuilder query;
private String[] fields;
private String[] storedFields;
private FetchSourceContext fetchSourceContext;
private String[] filteringAlias = Strings.EMPTY_ARRAY;
@ -122,12 +122,12 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
}
public String[] fields() {
return fields;
public String[] storedFields() {
return storedFields;
}
public ExplainRequest fields(String[] fields) {
this.fields = fields;
public ExplainRequest storedFields(String[] fields) {
this.storedFields = fields;
return this;
}
@ -167,8 +167,8 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
preference = in.readOptionalString();
query = in.readNamedWriteable(QueryBuilder.class);
filteringAlias = in.readStringArray();
fields = in.readOptionalStringArray();
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
storedFields = in.readOptionalStringArray();
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
nowInMillis = in.readVLong();
}
@ -181,8 +181,8 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
out.writeOptionalString(preference);
out.writeNamedWriteable(query);
out.writeStringArray(filteringAlias);
out.writeOptionalStringArray(fields);
out.writeOptionalStreamable(fetchSourceContext);
out.writeOptionalStringArray(storedFields);
out.writeOptionalWriteable(fetchSourceContext);
out.writeVLong(nowInMillis);
}
}

View File

@ -88,10 +88,10 @@ public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder<Ex
}
/**
* Explicitly specify the fields that will be returned for the explained document. By default, nothing is returned.
* Explicitly specify the stored fields that will be returned for the explained document. By default, nothing is returned.
*/
public ExplainRequestBuilder setFields(String... fields) {
request.fields(fields);
public ExplainRequestBuilder setStoredFields(String... fields) {
request.storedFields(fields);
return this;
}

View File

@ -31,20 +31,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
import org.elasticsearch.search.rescore.RescoreSearchContext;
@ -60,26 +54,15 @@ import java.io.IOException;
// TODO: AggregatedDfs. Currently the idf can be different then when executing a normal search with explain.
public class TransportExplainAction extends TransportSingleShardAction<ExplainRequest, ExplainResponse> {
private final IndicesService indicesService;
private final ScriptService scriptService;
private final BigArrays bigArrays;
private final FetchPhase fetchPhase;
private final SearchService searchService;
@Inject
public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
FetchPhase fetchPhase) {
TransportService transportService, SearchService searchService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ExplainAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
ExplainRequest::new, ThreadPool.Names.GET);
this.indicesService = indicesService;
this.scriptService = scriptService;
this.bigArrays = bigArrays;
this.fetchPhase = fetchPhase;
this.searchService = searchService;
}
@Override
@ -104,34 +87,30 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
@Override
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
if (!result.exists()) {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
}
SearchContext context = new DefaultSearchContext(0,
new ShardSearchLocalRequest(new String[] { request.type() }, request.nowInMillis, request.filteringAlias()), null,
result.searcher(), indexService, indexShard, scriptService, bigArrays,
threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
SearchContext.setCurrent(context);
Engine.GetResult result = null;
try {
result = context.indexShard().get(new Engine.Get(false, uidTerm));
if (!result.exists()) {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
}
context.parsedQuery(context.getQueryShardContext().toQuery(request.query()));
context.preProcess();
context.preProcess(true);
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
for (RescoreSearchContext ctx : context.rescore()) {
Rescorer rescorer = ctx.rescorer();
explanation = rescorer.explain(topLevelDocId, context, ctx, explanation);
}
if (request.fields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
if (request.storedFields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
// Advantage is that we're not opening a second searcher to retrieve the _source. Also
// because we are working in the same searcher in engineGetResult we can be sure that a
// doc isn't deleted between the initial get and this call.
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext());
GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), request.fetchSourceContext());
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
} else {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);
@ -139,8 +118,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
} catch (IOException e) {
throw new ElasticsearchException("Could not explain", e);
} finally {
context.close();
SearchContext.removeCurrent();
Releasables.close(result, context, () -> SearchContext.removeCurrent());
}
}

View File

@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.net.InetAddress;
import java.util.Objects;
public abstract class FieldStats<T> implements Writeable, ToXContent {
private final byte type;
@ -46,13 +47,11 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
protected T minValue;
protected T maxValue;
FieldStats(byte type, long maxDoc, boolean isSearchable, boolean isAggregatable) {
this(type, maxDoc, 0, 0, 0, isSearchable, isAggregatable, null, null);
}
FieldStats(byte type,
long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable, T minValue, T maxValue) {
Objects.requireNonNull(minValue, "minValue must not be null");
Objects.requireNonNull(maxValue, "maxValue must not be null");
this.type = type;
this.maxDoc = maxDoc;
this.docCount = docCount;
@ -220,14 +219,10 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
}
private void updateMinMax(T min, T max) {
if (minValue == null) {
minValue = min;
} else if (min != null && compare(minValue, min) > 0) {
if (compare(minValue, min) > 0) {
minValue = min;
}
if (maxValue == null) {
maxValue = max;
} else if (max != null && compare(maxValue, max) < 0) {
if (compare(maxValue, max) < 0) {
maxValue = max;
}
}
@ -266,11 +261,7 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
out.writeLong(sumTotalTermFreq);
out.writeBoolean(isSearchable);
out.writeBoolean(isAggregatable);
boolean hasMinMax = minValue != null;
out.writeBoolean(hasMinMax);
if (hasMinMax) {
writeMinMax(out);
}
writeMinMax(out);
}
protected abstract void writeMinMax(StreamOutput out) throws IOException;
@ -280,9 +271,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
* otherwise <code>false</code> is returned
*/
public boolean match(IndexConstraint constraint) {
if (minValue == null) {
return false;
}
int cmp;
T value = valueOf(constraint.getValue(), constraint.getOptionalFormat());
if (constraint.getProperty() == IndexConstraint.Property.MIN) {
@ -307,6 +295,31 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FieldStats<?> that = (FieldStats<?>) o;
if (type != that.type) return false;
if (maxDoc != that.maxDoc) return false;
if (docCount != that.docCount) return false;
if (sumDocFreq != that.sumDocFreq) return false;
if (sumTotalTermFreq != that.sumTotalTermFreq) return false;
if (isSearchable != that.isSearchable) return false;
if (isAggregatable != that.isAggregatable) return false;
if (!minValue.equals(that.minValue)) return false;
return maxValue.equals(that.maxValue);
}
@Override
public int hashCode() {
return Objects.hash(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
minValue, maxValue);
}
public static class Long extends FieldStats<java.lang.Long> {
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
@ -315,17 +328,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
isSearchable, isAggregatable, minValue, maxValue);
}
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, null, null);
}
public Long(long maxDoc,
boolean isSearchable, boolean isAggregatable) {
super((byte) 0, maxDoc, isSearchable, isAggregatable);
}
@Override
public int compare(java.lang.Long o1, java.lang.Long o2) {
return o1.compareTo(o2);
@ -344,12 +346,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public String getMinValueAsString() {
return minValue != null ? java.lang.Long.toString(minValue) : null;
return java.lang.Long.toString(minValue);
}
@Override
public String getMaxValueAsString() {
return maxValue != null ? java.lang.Long.toString(maxValue) : null;
return java.lang.Long.toString(maxValue);
}
}
@ -361,15 +363,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
minValue, maxValue);
}
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable, null, null);
}
public Double(long maxDoc, boolean isSearchable, boolean isAggregatable) {
super((byte) 1, maxDoc, isSearchable, isAggregatable);
}
@Override
public int compare(java.lang.Double o1, java.lang.Double o2) {
return o1.compareTo(o2);
@ -391,12 +384,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public String getMinValueAsString() {
return minValue != null ? java.lang.Double.toString(minValue) : null;
return java.lang.Double.toString(minValue);
}
@Override
public String getMaxValueAsString() {
return maxValue != null ? java.lang.Double.toString(maxValue) : null;
return java.lang.Double.toString(maxValue);
}
}
@ -412,20 +405,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
this.formatter = formatter;
}
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
FormatDateTimeFormatter formatter) {
super((byte) 2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
null, null);
this.formatter = formatter;
}
public Date(long maxDoc, boolean isSearchable, boolean isAggregatable,
FormatDateTimeFormatter formatter) {
super((byte) 2, maxDoc, isSearchable, isAggregatable);
this.formatter = formatter;
}
@Override
public int compare(java.lang.Long o1, java.lang.Long o2) {
return o1.compareTo(o2);
@ -449,12 +428,29 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public String getMinValueAsString() {
return minValue != null ? formatter.printer().print(minValue) : null;
return formatter.printer().print(minValue);
}
@Override
public String getMaxValueAsString() {
return maxValue != null ? formatter.printer().print(maxValue) : null;
return formatter.printer().print(maxValue);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
Date that = (Date) o;
return Objects.equals(formatter.format(), that.formatter.format());
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + formatter.format().hashCode();
return result;
}
}
@ -467,10 +463,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
minValue, maxValue);
}
public Text(long maxDoc, boolean isSearchable, boolean isAggregatable) {
super((byte) 3, maxDoc, isSearchable, isAggregatable);
}
@Override
public int compare(BytesRef o1, BytesRef o2) {
return o1.compareTo(o2);
@ -492,12 +484,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public String getMinValueAsString() {
return minValue != null ? minValue.utf8ToString() : null;
return minValue.utf8ToString();
}
@Override
public String getMaxValueAsString() {
return maxValue != null ? maxValue.utf8ToString() : null;
return maxValue.utf8ToString();
}
@Override
@ -516,10 +508,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
minValue, maxValue);
}
public Ip(long maxDoc, boolean isSearchable, boolean isAggregatable) {
super((byte) 4, maxDoc, isSearchable, isAggregatable);
}
@Override
public int compare(InetAddress o1, InetAddress o2) {
byte[] b1 = InetAddressPoint.encode(o1);
@ -544,12 +532,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public String getMinValueAsString() {
return minValue != null ? NetworkAddress.format(minValue) : null;
return NetworkAddress.format(minValue);
}
@Override
public String getMaxValueAsString() {
return maxValue != null ? NetworkAddress.format(maxValue) : null;
return NetworkAddress.format(maxValue);
}
}
@ -561,53 +549,35 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
long sumTotalTermFreq = in.readLong();
boolean isSearchable = in.readBoolean();
boolean isAggregatable = in.readBoolean();
boolean hasMinMax = in.readBoolean();
switch (type) {
case 0:
if (hasMinMax) {
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readLong(), in.readLong());
}
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
isSearchable, isAggregatable, in.readLong(), in.readLong());
case 1:
if (hasMinMax) {
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
}
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
case 2:
FormatDateTimeFormatter formatter = Joda.forPattern(in.readString());
if (hasMinMax) {
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
}
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, formatter);
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
case 3:
if (hasMinMax) {
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
}
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, null, null);
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
case 4:
InetAddress min = null;
InetAddress max = null;
if (hasMinMax) {
int l1 = in.readByte();
byte[] b1 = new byte[l1];
int l2 = in.readByte();
byte[] b2 = new byte[l2];
min = InetAddressPoint.decode(b1);
max = InetAddressPoint.decode(b2);
}
int l1 = in.readByte();
byte[] b1 = new byte[l1];
in.readBytes(b1, 0, l1);
int l2 = in.readByte();
byte[] b2 = new byte[l2];
in.readBytes(b2, 0, l2);
InetAddress min = InetAddressPoint.decode(b1);
InetAddress max = InetAddressPoint.decode(b2);
return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, min, max);

View File

@ -51,7 +51,7 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
private String parent;
private String preference;
private String[] fields;
private String[] storedFields;
private FetchSourceContext fetchSourceContext;
@ -61,7 +61,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
private VersionType versionType = VersionType.INTERNAL;
private long version = Versions.MATCH_ANY;
private boolean ignoreErrorsOnGeneratedFields;
public GetRequest() {
type = "_all";
@ -187,20 +186,20 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
}
/**
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
* Explicitly specify the stored fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
public GetRequest fields(String... fields) {
this.fields = fields;
public GetRequest storedFields(String... fields) {
this.storedFields = fields;
return this;
}
/**
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
* Explicitly specify the stored fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
public String[] fields() {
return this.fields;
public String[] storedFields() {
return this.storedFields;
}
/**
@ -248,19 +247,10 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
return this;
}
public GetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
return this;
}
public VersionType versionType() {
return this.versionType;
}
public boolean ignoreErrorsOnGeneratedFields() {
return ignoreErrorsOnGeneratedFields;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -270,19 +260,12 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
parent = in.readOptionalString();
preference = in.readOptionalString();
refresh = in.readBoolean();
int size = in.readInt();
if (size >= 0) {
fields = new String[size];
for (int i = 0; i < size; i++) {
fields[i] = in.readString();
}
}
storedFields = in.readOptionalStringArray();
realtime = in.readBoolean();
this.ignoreErrorsOnGeneratedFields = in.readBoolean();
this.versionType = VersionType.fromValue(in.readByte());
this.version = in.readLong();
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
}
@Override
@ -295,19 +278,11 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
out.writeOptionalString(preference);
out.writeBoolean(refresh);
if (fields == null) {
out.writeInt(-1);
} else {
out.writeInt(fields.length);
for (String field : fields) {
out.writeString(field);
}
}
out.writeOptionalStringArray(storedFields);
out.writeBoolean(realtime);
out.writeBoolean(ignoreErrorsOnGeneratedFields);
out.writeByte(versionType.getValue());
out.writeLong(version);
out.writeOptionalStreamable(fetchSourceContext);
out.writeOptionalWriteable(fetchSourceContext);
}
@Override

View File

@ -88,8 +88,8 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetReq
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
public GetRequestBuilder setFields(String... fields) {
request.fields(fields);
public GetRequestBuilder setStoredFields(String... fields) {
request.storedFields(fields);
return this;
}
@ -155,11 +155,6 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetReq
return this;
}
public GetRequestBuilder setIgnoreErrorsOnGeneratedFields(Boolean ignoreErrorsOnGeneratedFields) {
request.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields);
return this;
}
/**
* Sets the version, which will cause the get operation to only be performed if a matching
* version exists and no changes happened on the doc since then.

View File

@ -134,14 +134,26 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
return getResult.getSource();
}
/**
* @deprecated Use {@link GetResponse#getSource()} instead
*/
@Deprecated
public Map<String, GetField> getFields() {
return getResult.getFields();
}
/**
* @deprecated Use {@link GetResponse#getSource()} instead
*/
@Deprecated
public GetField getField(String name) {
return getResult.field(name);
}
/**
* @deprecated Use {@link GetResponse#getSource()} instead
*/
@Deprecated
@Override
public Iterator<GetField> iterator() {
return getResult.iterator();

View File

@ -28,6 +28,7 @@ import org.elasticsearch.action.RealtimeRequest;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@ -58,7 +59,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
private String id;
private String routing;
private String parent;
private String[] fields;
private String[] storedFields;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
private FetchSourceContext fetchSourceContext;
@ -136,13 +137,13 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
return parent;
}
public Item fields(String... fields) {
this.fields = fields;
public Item storedFields(String... fields) {
this.storedFields = fields;
return this;
}
public String[] fields() {
return this.fields;
public String[] storedFields() {
return this.storedFields;
}
public long version() {
@ -188,17 +189,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
id = in.readString();
routing = in.readOptionalString();
parent = in.readOptionalString();
int size = in.readVInt();
if (size > 0) {
fields = new String[size];
for (int i = 0; i < size; i++) {
fields[i] = in.readString();
}
}
storedFields = in.readOptionalStringArray();
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
}
@Override
@ -208,19 +203,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
out.writeString(id);
out.writeOptionalString(routing);
out.writeOptionalString(parent);
if (fields == null) {
out.writeVInt(0);
} else {
out.writeVInt(fields.length);
for (String field : fields) {
out.writeString(field);
}
}
out.writeOptionalStringArray(storedFields);
out.writeLong(version);
out.writeByte(versionType.getValue());
out.writeOptionalStreamable(fetchSourceContext);
out.writeOptionalWriteable(fetchSourceContext);
}
@Override
@ -233,7 +220,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
if (version != item.version) return false;
if (fetchSourceContext != null ? !fetchSourceContext.equals(item.fetchSourceContext) : item.fetchSourceContext != null)
return false;
if (!Arrays.equals(fields, item.fields)) return false;
if (!Arrays.equals(storedFields, item.storedFields)) return false;
if (!id.equals(item.id)) return false;
if (!index.equals(item.index)) return false;
if (routing != null ? !routing.equals(item.routing) : item.routing != null) return false;
@ -251,7 +238,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
result = 31 * result + id.hashCode();
result = 31 * result + (routing != null ? routing.hashCode() : 0);
result = 31 * result + (parent != null ? parent.hashCode() : 0);
result = 31 * result + (fields != null ? Arrays.hashCode(fields) : 0);
result = 31 * result + (storedFields != null ? Arrays.hashCode(storedFields) : 0);
result = 31 * result + Long.hashCode(version);
result = 31 * result + versionType.hashCode();
result = 31 * result + (fetchSourceContext != null ? fetchSourceContext.hashCode() : 0);
@ -262,8 +249,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
String preference;
boolean realtime = true;
boolean refresh;
public boolean ignoreErrorsOnGeneratedFields = false;
List<Item> items = new ArrayList<>();
public List<Item> getItems() {
@ -338,11 +323,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
}
public MultiGetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
return this;
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true);
}
@ -386,7 +366,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
String id = null;
String routing = defaultRouting;
String parent = null;
List<String> fields = null;
List<String> storedFields = null;
long version = Versions.MATCH_ANY;
VersionType versionType = VersionType.INTERNAL;
@ -410,8 +390,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
parent = parser.text();
} else if ("fields".equals(currentFieldName)) {
fields = new ArrayList<>();
fields.add(parser.text());
throw new ParsingException(parser.getTokenLocation(),
"Unsupported field [fields] used, expected [stored_fields] instead");
} else if ("stored_fields".equals(currentFieldName)) {
storedFields = new ArrayList<>();
storedFields.add(parser.text());
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
version = parser.longValue();
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
@ -427,9 +410,12 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
fields = new ArrayList<>();
throw new ParsingException(parser.getTokenLocation(),
"Unsupported field [fields] used, expected [stored_fields] instead");
} else if ("stored_fields".equals(currentFieldName)) {
storedFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
fields.add(parser.text());
storedFields.add(parser.text());
}
} else if ("_source".equals(currentFieldName)) {
ArrayList<String> includes = new ArrayList<>();
@ -471,12 +457,12 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
}
}
String[] aFields;
if (fields != null) {
aFields = fields.toArray(new String[fields.size()]);
if (storedFields != null) {
aFields = storedFields.toArray(new String[storedFields.size()]);
} else {
aFields = defaultFields;
}
items.add(new Item(index, type, id).routing(routing).fields(aFields).parent(parent).version(version).versionType(versionType)
items.add(new Item(index, type, id).routing(routing).storedFields(aFields).parent(parent).version(version).versionType(versionType)
.fetchSourceContext(fetchSourceContext == null ? defaultFetchSource : fetchSourceContext));
}
}
@ -491,7 +477,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
if (!token.isValue()) {
throw new IllegalArgumentException("ids array element should only contain ids");
}
items.add(new Item(defaultIndex, defaultType, parser.text()).fields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting));
items.add(new Item(defaultIndex, defaultType, parser.text()).storedFields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting));
}
}
@ -510,7 +496,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
preference = in.readOptionalString();
refresh = in.readBoolean();
realtime = in.readBoolean();
ignoreErrorsOnGeneratedFields = in.readBoolean();
int size = in.readVInt();
items = new ArrayList<>(size);
@ -525,7 +510,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
out.writeOptionalString(preference);
out.writeBoolean(refresh);
out.writeBoolean(realtime);
out.writeBoolean(ignoreErrorsOnGeneratedFields);
out.writeVInt(items.size());
for (Item item : items) {

View File

@ -80,9 +80,4 @@ public class MultiGetRequestBuilder extends ActionRequestBuilder<MultiGetRequest
request.realtime(realtime);
return this;
}
public MultiGetRequestBuilder setIgnoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
request.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields);
return this;
}
}

View File

@ -35,7 +35,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
private String preference;
boolean realtime = true;
boolean refresh;
boolean ignoreErrorsOnGeneratedFields = false;
IntArrayList locations;
List<MultiGetRequest.Item> items;
@ -52,7 +51,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
preference = multiGetRequest.preference;
realtime = multiGetRequest.realtime;
refresh = multiGetRequest.refresh;
ignoreErrorsOnGeneratedFields = multiGetRequest.ignoreErrorsOnGeneratedFields;
}
@Override
@ -87,11 +85,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
return this;
}
public MultiGetShardRequest ignoreErrorsOnGeneratedFields(Boolean ignoreErrorsOnGeneratedFields) {
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
return this;
}
public boolean refresh() {
return this.refresh;
}
@ -130,7 +123,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
preference = in.readOptionalString();
refresh = in.readBoolean();
realtime = in.readBoolean();
ignoreErrorsOnGeneratedFields = in.readBoolean();
}
@Override
@ -146,11 +138,5 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
out.writeOptionalString(preference);
out.writeBoolean(refresh);
out.writeBoolean(realtime);
out.writeBoolean(ignoreErrorsOnGeneratedFields);
}
public boolean ignoreErrorsOnGeneratedFields() {
return ignoreErrorsOnGeneratedFields;
}
}

View File

@ -92,8 +92,8 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
indexShard.refresh("refresh_flag_get");
}
GetResult result = indexShard.getService().get(request.type(), request.id(), request.fields(),
request.realtime(), request.version(), request.versionType(), request.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields());
GetResult result = indexShard.getService().get(request.type(), request.id(), request.storedFields(),
request.realtime(), request.version(), request.versionType(), request.fetchSourceContext());
return new GetResponse(result);
}

View File

@ -88,13 +88,15 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
for (int i = 0; i < request.locations.size(); i++) {
MultiGetRequest.Item item = request.items.get(i);
try {
GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.fields(), request.realtime(), item.version(), item.versionType(), item.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields());
GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.storedFields(), request.realtime(), item.version(),
item.versionType(), item.fetchSourceContext());
response.add(request.locations.get(i), new GetResponse(getResult));
} catch (Exception e) {
if (TransportActions.isShardNotAvailableException(e)) {
throw (ElasticsearchException) e;
} else {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e);
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId,
item.type(), item.id()), e);
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
}
}

View File

@ -35,16 +35,18 @@ public class MainResponse extends ActionResponse implements ToXContent {
private String nodeName;
private Version version;
private ClusterName clusterName;
private String clusterUuid;
private Build build;
private boolean available;
MainResponse() {
}
public MainResponse(String nodeName, Version version, ClusterName clusterName, Build build, boolean available) {
public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build, boolean available) {
this.nodeName = nodeName;
this.version = version;
this.clusterName = clusterName;
this.clusterUuid = clusterUuid;
this.build = build;
this.available = available;
}
@ -61,6 +63,10 @@ public class MainResponse extends ActionResponse implements ToXContent {
return clusterName;
}
public String getClusterUuid() {
return clusterUuid;
}
public Build getBuild() {
return build;
}
@ -75,6 +81,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
out.writeString(nodeName);
Version.writeVersion(version, out);
clusterName.writeTo(out);
out.writeString(clusterUuid);
Build.writeBuild(build, out);
out.writeBoolean(available);
}
@ -85,6 +92,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
nodeName = in.readString();
version = Version.readVersion(in);
clusterName = new ClusterName(in);
clusterUuid = in.readString();
build = Build.readBuild(in);
available = in.readBoolean();
}
@ -94,6 +102,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
builder.startObject();
builder.field("name", nodeName);
builder.field("cluster_name", clusterName.value());
builder.field("cluster_uuid", clusterUuid);
builder.startObject("version")
.field("number", version.toString())
.field("build_hash", build.shortHash())

View File

@ -52,7 +52,7 @@ public class TransportMainAction extends HandledTransportAction<MainRequest, Mai
assert Node.NODE_NAME_SETTING.exists(settings);
final boolean available = clusterState.getBlocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE) == false;
listener.onResponse(
new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(), Build.CURRENT,
available));
new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(),
clusterState.metaData().clusterUUID(), Build.CURRENT, available));
}
}

View File

@ -40,8 +40,6 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;

View File

@ -28,8 +28,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;

View File

@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchSearchResult;

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.search.controller;
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import com.carrotsearch.hppc.ObjectObjectHashMap;
@ -89,8 +89,7 @@ public class SearchPhaseController extends AbstractComponent {
private final ScriptService scriptService;
private final ClusterService clusterService;
@Inject
public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) {
SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) {
super(settings);
this.bigArrays = bigArrays;
this.scriptService = scriptService;

View File

@ -25,8 +25,6 @@ import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;

View File

@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.InternalSearchResponse;

View File

@ -28,8 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;

View File

@ -29,8 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchRequest;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;

View File

@ -17,17 +17,15 @@
* under the License.
*/
package org.elasticsearch.search.action;
package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
@ -45,9 +43,7 @@ import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.search.query.ScrollQuerySearchResult;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
@ -73,37 +69,10 @@ public class SearchTransportService extends AbstractComponent {
public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]";
private final TransportService transportService;
private final SearchService searchService;
@Inject
public SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) {
SearchTransportService(Settings settings, TransportService transportService) {
super(settings);
this.transportService = transportService;
this.searchService = searchService;
transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME,
new FreeContextTransportHandler<>());
transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME,
new FreeContextTransportHandler<>());
transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ClearScrollContextsRequest::new, ThreadPool.Names.SAME,
new ClearScrollContextsTransportHandler());
transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
new SearchDfsTransportHandler());
transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryTransportHandler());
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryByIdTransportHandler());
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryScrollTransportHandler());
transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryFetchTransportHandler());
transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryQueryFetchTransportHandler());
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryFetchScrollTransportHandler());
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH,
new FetchByIdTransportHandler<>());
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH,
new FetchByIdTransportHandler<>());
}
public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) {
@ -127,8 +96,8 @@ public class SearchTransportService extends AbstractComponent {
}
public void sendClearAllScrollContexts(DiscoveryNode node, final ActionListener<TransportResponse> listener) {
transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, new ClearScrollContextsRequest(),
new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, TransportRequest.Empty.INSTANCE,
new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
}
public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request,
@ -281,87 +250,66 @@ public class SearchTransportService extends AbstractComponent {
}
}
class FreeContextTransportHandler<FreeContextRequest extends ScrollFreeContextRequest>
implements TransportRequestHandler<FreeContextRequest> {
@Override
public void messageReceived(FreeContextRequest request, TransportChannel channel) throws Exception {
boolean freed = searchService.freeContext(request.id());
channel.sendResponse(new SearchFreeContextResponse(freed));
}
}
static class ClearScrollContextsRequest extends TransportRequest {
}
class ClearScrollContextsTransportHandler implements TransportRequestHandler<ClearScrollContextsRequest> {
@Override
public void messageReceived(ClearScrollContextsRequest request, TransportChannel channel) throws Exception {
searchService.freeAllScrollContexts();
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class SearchDfsTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
@Override
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
DfsSearchResult result = searchService.executeDfsPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
@Override
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
QuerySearchResultProvider result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryByIdTransportHandler implements TransportRequestHandler<QuerySearchRequest> {
@Override
public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception {
QuerySearchResult result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryScrollTransportHandler implements TransportRequestHandler<InternalScrollSearchRequest> {
@Override
public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
ScrollQuerySearchResult result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryFetchTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
@Override
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryQueryFetchTransportHandler implements TransportRequestHandler<QuerySearchRequest> {
@Override
public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception {
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
}
}
class FetchByIdTransportHandler<Request extends ShardFetchRequest> implements TransportRequestHandler<Request> {
@Override
public void messageReceived(Request request, TransportChannel channel) throws Exception {
FetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryFetchScrollTransportHandler implements TransportRequestHandler<InternalScrollSearchRequest> {
@Override
public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
}
public static void registerRequestHandler(TransportService transportService, SearchService searchService) {
transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME,
((request, channel) -> {
boolean freed = searchService.freeContext(request.id());
channel.sendResponse(new SearchFreeContextResponse(freed));
}));
transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME,
(request, channel) -> {
boolean freed = searchService.freeContext(request.id());
channel.sendResponse(new SearchFreeContextResponse(freed));
});
transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, () -> TransportRequest.Empty.INSTANCE,
ThreadPool.Names.SAME, (request, channel) -> {
searchService.freeAllScrollContexts();
channel.sendResponse(TransportResponse.Empty.INSTANCE);
});
transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
DfsSearchResult result = searchService.executeDfsPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
QuerySearchResultProvider result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
QuerySearchResult result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
ScrollQuerySearchResult result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
FetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
FetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
});
}
}

View File

@ -32,7 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
@ -44,8 +43,6 @@ import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
/**
*/
public class TransportClearScrollAction extends HandledTransportAction<ClearScrollRequest, ClearScrollResponse> {
private final ClusterService clusterService;
@ -53,11 +50,11 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
@Inject
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ClusterService clusterService, SearchTransportService searchTransportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
ClusterService clusterService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
this.clusterService = clusterService;
this.searchTransportService = searchTransportService;
this.searchTransportService = new SearchTransportService(settings, transportService);
}
@Override

View File

@ -29,10 +29,11 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -53,13 +54,14 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
private final SearchPhaseController searchPhaseController;
@Inject
public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController,
TransportService transportService, SearchTransportService searchTransportService,
public TransportSearchAction(Settings settings, ThreadPool threadPool, BigArrays bigArrays, ScriptService scriptService,
TransportService transportService, SearchService searchService,
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
indexNameExpressionResolver) {
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
this.searchPhaseController = searchPhaseController;
this.searchTransportService = searchTransportService;
this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);;
this.searchTransportService = new SearchTransportService(settings, transportService);
SearchTransportService.registerRequestHandler(transportService, searchService);
this.clusterService = clusterService;
}

View File

@ -26,8 +26,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -45,15 +45,14 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
private final SearchPhaseController searchPhaseController;
@Inject
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ClusterService clusterService, SearchTransportService searchTransportService,
SearchPhaseController searchPhaseController,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
public TransportSearchScrollAction(Settings settings, BigArrays bigArrays, ThreadPool threadPool, ScriptService scriptService,
TransportService transportService, ClusterService clusterService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
SearchScrollRequest::new);
this.clusterService = clusterService;
this.searchTransportService = searchTransportService;
this.searchPhaseController = searchPhaseController;
this.searchTransportService = new SearchTransportService(settings, transportService);
this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);
}
@Override

View File

@ -180,7 +180,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
super(item.index());
this.id = item.id();
this.type = item.type();
this.selectedFields(item.fields());
this.selectedFields(item.storedFields());
this.routing(item.routing());
this.parent(item.parent());
}

View File

@ -186,7 +186,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
@Override
public void onResponse(IndexResponse response) {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult());
if (request.fields() != null && request.fields().length > 0) {
if ((request.fetchSource() != null && request.fetchSource().fetchSource()) ||
(request.fields() != null && request.fields().length > 0)) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true);
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
} else {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.update;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
@ -28,9 +29,11 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
@ -51,6 +54,7 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.lookup.SourceLookup;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -76,7 +80,7 @@ public class UpdateHelper extends AbstractComponent {
public Result prepare(UpdateRequest request, IndexShard indexShard) {
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME},
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE, false);
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE);
return prepare(indexShard.shardId(), request, getResult);
}
@ -267,17 +271,19 @@ public class UpdateHelper extends AbstractComponent {
}
/**
* Extracts the fields from the updated document to be returned in a update response
* Applies {@link UpdateRequest#fetchSource()} to the _source of the updated document to be returned in a update response.
* For BWC this function also extracts the {@link UpdateRequest#fields()} from the updated document to be returned in a update response
*/
public GetResult extractGetResult(final UpdateRequest request, String concreteIndex, long version, final Map<String, Object> source, XContentType sourceContentType, @Nullable final BytesReference sourceAsBytes) {
if (request.fields() == null || request.fields().length == 0) {
if ((request.fields() == null || request.fields().length == 0) &&
(request.fetchSource() == null || request.fetchSource().fetchSource() == false)) {
return null;
}
SourceLookup sourceLookup = new SourceLookup();
sourceLookup.setSource(source);
boolean sourceRequested = false;
Map<String, GetField> fields = null;
if (request.fields() != null && request.fields().length > 0) {
SourceLookup sourceLookup = new SourceLookup();
sourceLookup.setSource(source);
for (String field : request.fields()) {
if (field.equals("_source")) {
sourceRequested = true;
@ -298,8 +304,26 @@ public class UpdateHelper extends AbstractComponent {
}
}
BytesReference sourceFilteredAsBytes = sourceAsBytes;
if (request.fetchSource() != null && request.fetchSource().fetchSource()) {
sourceRequested = true;
if (request.fetchSource().includes().length > 0 || request.fetchSource().excludes().length > 0) {
Object value = sourceLookup.filter(request.fetchSource().includes(), request.fetchSource().excludes());
try {
final int initialCapacity = Math.min(1024, sourceAsBytes.length());
BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity);
try (XContentBuilder builder = new XContentBuilder(sourceContentType.xContent(), streamOutput)) {
builder.value(value);
sourceFilteredAsBytes = builder.bytes();
}
} catch (IOException e) {
throw new ElasticsearchException("Error filtering source", e);
}
}
}
// TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType)
return new GetResult(concreteIndex, request.type(), request.id(), version, true, sourceRequested ? sourceAsBytes : null, fields);
return new GetResult(concreteIndex, request.type(), request.id(), version, true, sourceRequested ? sourceFilteredAsBytes : null, fields);
}
public static class Result {

View File

@ -32,6 +32,8 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@ -42,6 +44,7 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import java.io.IOException;
import java.util.Collections;
@ -55,6 +58,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
*/
public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
implements DocumentRequest<UpdateRequest>, WriteRequest<UpdateRequest> {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(UpdateRequest.class));
private String type;
private String id;
@ -68,6 +73,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
Script script;
private String[] fields;
private FetchSourceContext fetchSourceContext;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
@ -373,17 +379,80 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
/**
* Explicitly specify the fields that will be returned. By default, nothing is returned.
* @deprecated Use {@link UpdateRequest#fetchSource(String[], String[])} instead
*/
@Deprecated
public UpdateRequest fields(String... fields) {
this.fields = fields;
return this;
}
/**
* Get the fields to be returned.
* Indicate that _source should be returned with every hit, with an
* "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param include
* An optional include (optionally wildcarded) pattern to filter
* the returned _source
* @param exclude
* An optional exclude (optionally wildcarded) pattern to filter
* the returned _source
*/
public UpdateRequest fetchSource(@Nullable String include, @Nullable String exclude) {
this.fetchSourceContext = new FetchSourceContext(include, exclude);
return this;
}
/**
* Indicate that _source should be returned, with an
* "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param includes
* An optional list of include (optionally wildcarded) pattern to
* filter the returned _source
* @param excludes
* An optional list of exclude (optionally wildcarded) pattern to
* filter the returned _source
*/
public UpdateRequest fetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
this.fetchSourceContext = new FetchSourceContext(includes, excludes);
return this;
}
/**
* Indicates whether the response should contain the updated _source.
*/
public UpdateRequest fetchSource(boolean fetchSource) {
this.fetchSourceContext = new FetchSourceContext(fetchSource);
return this;
}
/**
* Explicitely set the fetch source context for this request
*/
public UpdateRequest fetchSource(FetchSourceContext context) {
this.fetchSourceContext = context;
return this;
}
/**
* Get the fields to be returned.
* @deprecated Use {@link UpdateRequest#fetchSource()} instead
*/
@Deprecated
public String[] fields() {
return this.fields;
return fields;
}
/**
* Gets the {@link FetchSourceContext} which defines how the _source should
* be fetched.
*/
public FetchSourceContext fetchSource() {
return fetchSourceContext;
}
/**
@ -618,16 +687,16 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return upsertRequest;
}
public UpdateRequest source(XContentBuilder source) throws Exception {
return source(source.bytes());
public UpdateRequest fromXContent(XContentBuilder source) throws Exception {
return fromXContent(source.bytes());
}
public UpdateRequest source(byte[] source) throws Exception {
return source(source, 0, source.length);
public UpdateRequest fromXContent(byte[] source) throws Exception {
return fromXContent(source, 0, source.length);
}
public UpdateRequest source(byte[] source, int offset, int length) throws Exception {
return source(new BytesArray(source, offset, length));
public UpdateRequest fromXContent(byte[] source, int offset, int length) throws Exception {
return fromXContent(new BytesArray(source, offset, length));
}
/**
@ -646,7 +715,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return detectNoop;
}
public UpdateRequest source(BytesReference source) throws Exception {
public UpdateRequest fromXContent(BytesReference source) throws Exception {
Script script = null;
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
XContentParser.Token token = parser.nextToken();
@ -685,6 +754,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
if (fields != null) {
fields(fields.toArray(new String[fields.size()]));
}
} else if ("_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.parse(parser);
}
}
if (script != null) {
@ -729,13 +800,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
doc = new IndexRequest();
doc.readFrom(in);
}
int size = in.readInt();
if (size >= 0) {
fields = new String[size];
for (int i = 0; i < size; i++) {
fields[i] = in.readString();
}
}
fields = in.readOptionalStringArray();
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
if (in.readBoolean()) {
upsertRequest = new IndexRequest();
upsertRequest.readFrom(in);
@ -772,14 +838,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
doc.id(id);
doc.writeTo(out);
}
if (fields == null) {
out.writeInt(-1);
} else {
out.writeInt(fields.length);
for (String field : fields) {
out.writeString(field);
}
}
out.writeOptionalStringArray(fields);
out.writeOptionalWriteable(fetchSourceContext);
if (upsertRequest == null) {
out.writeBoolean(false);
} else {

View File

@ -25,17 +25,22 @@ import org.elasticsearch.action.support.WriteRequestBuilder;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.rest.action.document.RestUpdateAction;
import org.elasticsearch.script.Script;
import java.util.Map;
public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<UpdateRequest, UpdateResponse, UpdateRequestBuilder>
implements WriteRequestBuilder<UpdateRequestBuilder> {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class));
public UpdateRequestBuilder(ElasticsearchClient client, UpdateAction action) {
super(client, action, new UpdateRequest());
@ -90,12 +95,57 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
/**
* Explicitly specify the fields that will be returned. By default, nothing is returned.
* @deprecated Use {@link UpdateRequestBuilder#setFetchSource(String[], String[])} instead
*/
@Deprecated
public UpdateRequestBuilder setFields(String... fields) {
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
request.fields(fields);
return this;
}
/**
* Indicate that _source should be returned with every hit, with an
* "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param include
* An optional include (optionally wildcarded) pattern to filter
* the returned _source
* @param exclude
* An optional exclude (optionally wildcarded) pattern to filter
* the returned _source
*/
public UpdateRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
request.fetchSource(include, exclude);
return this;
}
/**
* Indicate that _source should be returned, with an
* "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param includes
* An optional list of include (optionally wildcarded) pattern to
* filter the returned _source
* @param excludes
* An optional list of exclude (optionally wildcarded) pattern to
* filter the returned _source
*/
public UpdateRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
request.fetchSource(includes, excludes);
return this;
}
/**
* Indicates whether the response should contain the updated _source.
*/
public UpdateRequestBuilder setFetchSource(boolean fetchSource) {
request.fetchSource(fetchSource);
return this;
}
/**
* Sets the number of retries of a version conflict occurs because the document was updated between
* getting it and updating it. Defaults to 0.
@ -279,26 +329,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
return this;
}
public UpdateRequestBuilder setSource(XContentBuilder source) throws Exception {
request.source(source);
return this;
}
public UpdateRequestBuilder setSource(byte[] source) throws Exception {
request.source(source);
return this;
}
public UpdateRequestBuilder setSource(byte[] source, int offset, int length) throws Exception {
request.source(source, offset, length);
return this;
}
public UpdateRequestBuilder setSource(BytesReference source) throws Exception {
request.source(source);
return this;
}
/**
* Sets whether the specified doc parameter should be used as upsert document.
*/

View File

@ -28,6 +28,7 @@ import org.apache.lucene.util.StringHelper;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.cli.UserException;
import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.inject.CreationException;
@ -227,13 +228,13 @@ final class Bootstrap {
}
/**
* This method is invoked by {@link Elasticsearch#main(String[])}
* to startup elasticsearch.
* This method is invoked by {@link Elasticsearch#main(String[])} to startup elasticsearch.
*/
static void init(
final boolean foreground,
final Path pidFile,
final Map<String, String> esSettings) throws BootstrapException, NodeValidationException {
final boolean quiet,
final Map<String, String> esSettings) throws BootstrapException, NodeValidationException, UserException {
// Set the system property before anything has a chance to trigger its use
initLoggerPrefix();
@ -259,8 +260,9 @@ final class Bootstrap {
}
}
final boolean closeStandardStreams = (foreground == false) || quiet;
try {
if (!foreground) {
if (closeStandardStreams) {
final Logger rootLogger = ESLoggerFactory.getRootLogger();
final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class);
if (maybeConsoleAppender != null) {
@ -285,7 +287,7 @@ final class Bootstrap {
INSTANCE.start();
if (!foreground) {
if (closeStandardStreams) {
closeSysError();
}
} catch (NodeValidationException | RuntimeException e) {

View File

@ -26,7 +26,7 @@ import java.util.Map;
* Wrapper exception for checked exceptions thrown during the bootstrap process. Methods invoked
* during bootstrap should explicitly declare the checked exceptions that they can throw, rather
* than declaring the top-level checked exception {@link Exception}. This exception exists to wrap
* these checked exceptions so that {@link Bootstrap#init(boolean, Path, Map)} does not have to
* these checked exceptions so that {@link Bootstrap#init(boolean, Path, boolean, Map)} does not have to
* declare all of these checked exceptions.
*/
class BootstrapException extends Exception {

View File

@ -44,6 +44,7 @@ class Elasticsearch extends SettingCommand {
private final OptionSpecBuilder versionOption;
private final OptionSpecBuilder daemonizeOption;
private final OptionSpec<Path> pidfileOption;
private final OptionSpecBuilder quietOption;
// visible for testing
Elasticsearch() {
@ -58,6 +59,10 @@ class Elasticsearch extends SettingCommand {
.availableUnless(versionOption)
.withRequiredArg()
.withValuesConvertedBy(new PathConverter());
quietOption = parser.acceptsAll(Arrays.asList("q", "quiet"),
"Turns off standard ouput/error streams logging in console")
.availableUnless(versionOption)
.availableUnless(daemonizeOption);
}
/**
@ -92,17 +97,19 @@ class Elasticsearch extends SettingCommand {
final boolean daemonize = options.has(daemonizeOption);
final Path pidFile = pidfileOption.value(options);
final boolean quiet = options.has(quietOption);
try {
init(daemonize, pidFile, settings);
init(daemonize, pidFile, quiet, settings);
} catch (NodeValidationException e) {
throw new UserException(ExitCodes.CONFIG, e.getMessage());
}
}
void init(final boolean daemonize, final Path pidFile, final Map<String, String> esSettings) throws NodeValidationException {
void init(final boolean daemonize, final Path pidFile, final boolean quiet, final Map<String, String> esSettings)
throws NodeValidationException, UserException {
try {
Bootstrap.init(!daemonize, pidFile, esSettings);
Bootstrap.init(!daemonize, pidFile, quiet, esSettings);
} catch (BootstrapException | RuntimeException e) {
// format exceptions to the console in a special way
// to avoid 2MB stacktraces from guice, etc.
@ -116,7 +123,8 @@ class Elasticsearch extends SettingCommand {
*
* http://commons.apache.org/proper/commons-daemon/procrun.html
*
* NOTE: If this method is renamed and/or moved, make sure to update service.bat!
* NOTE: If this method is renamed and/or moved, make sure to
* update elasticsearch-service.bat!
*/
static void close(String[] args) throws IOException {
Bootstrap.stop();

View File

@ -257,11 +257,6 @@ final class Security {
for (Path path : environment.dataFiles()) {
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
}
// TODO: this should be removed in ES 6.0! We will no longer support data paths with the cluster as a folder
// https://github.com/elastic/elasticsearch/issues/20391
for (Path path : environment.dataWithClusterFiles()) {
addPathIfExists(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
}
for (Path path : environment.repoFiles()) {
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
}

View File

@ -136,7 +136,6 @@ public abstract class TransportClient extends AbstractClient {
}
modules.add(networkModule);
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
modules.add(searchModule);
ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
pluginsService.filterPlugins(ActionPlugin.class));
modules.add(actionModule);

View File

@ -22,7 +22,6 @@ package org.elasticsearch.cluster;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -37,7 +36,6 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
@ -629,12 +627,6 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
return nodes;
}
public Builder routingResult(RoutingAllocation.Result routingResult) {
this.routingTable = routingResult.routingTable();
this.metaData = routingResult.metaData();
return this;
}
public Builder routingTable(RoutingTable routingTable) {
this.routingTable = routingTable;
return this;

View File

@ -38,7 +38,6 @@ import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
@ -311,10 +310,7 @@ public class ShardStateAction extends AbstractComponent {
ClusterState maybeUpdatedState = currentState;
try {
RoutingAllocation.Result result = applyFailedShards(currentState, shardRoutingsToBeApplied, staleShardsToBeApplied);
if (result.changed()) {
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
}
maybeUpdatedState = applyFailedShards(currentState, shardRoutingsToBeApplied, staleShardsToBeApplied);
batchResultBuilder.successes(tasksToBeApplied);
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply failed shards {}", shardRoutingsToBeApplied), e);
@ -327,7 +323,7 @@ public class ShardStateAction extends AbstractComponent {
}
// visible for testing
RoutingAllocation.Result applyFailedShards(ClusterState currentState, List<FailedRerouteAllocation.FailedShard> failedShards,
ClusterState applyFailedShards(ClusterState currentState, List<FailedRerouteAllocation.FailedShard> failedShards,
List<FailedRerouteAllocation.StaleShard> staleShards) {
return allocationService.applyFailedShards(currentState, failedShards, staleShards);
}
@ -426,11 +422,7 @@ public class ShardStateAction extends AbstractComponent {
ClusterState maybeUpdatedState = currentState;
try {
RoutingAllocation.Result result =
allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied, true);
if (result.changed()) {
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
}
maybeUpdatedState = allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied, true);
builder.successes(tasksToBeApplied);
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e);

View File

@ -46,7 +46,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
@ -430,10 +429,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
if (request.state() == State.OPEN) {
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable())
.addAsNew(updatedState.metaData().index(request.index()));
RoutingAllocation.Result routingResult = allocationService.reroute(
updatedState = allocationService.reroute(
ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(),
"index [" + request.index() + "] created");
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
}
removalReason = "cleaning up after validating index on master";
return updatedState;
@ -500,15 +498,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
validationErrors.add("custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]");
}
}
//norelease - this can be removed?
Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null);
if (number_of_primaries != null && number_of_primaries <= 0) {
validationErrors.add("index must have 1 or more primary shards");
}
if (number_of_replicas != null && number_of_replicas < 0) {
validationErrors.add("index must have 0 or more replica shards");
}
return validationErrors;
}

View File

@ -27,7 +27,6 @@ import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractComponent;
@ -108,9 +107,12 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
MetaData newMetaData = metaDataBuilder.build();
ClusterBlocks blocks = clusterBlocksBuilder.build();
RoutingAllocation.Result routingResult = allocationService.reroute(
ClusterState.builder(currentState).routingTable(routingTableBuilder.build()).metaData(newMetaData).build(),
return allocationService.reroute(
ClusterState.builder(currentState)
.routingTable(routingTableBuilder.build())
.metaData(newMetaData)
.blocks(blocks)
.build(),
"deleted indices [" + indices + "]");
return ClusterState.builder(currentState).routingResult(routingResult).metaData(newMetaData).blocks(blocks).build();
}
}

View File

@ -31,7 +31,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractComponent;
@ -125,11 +124,10 @@ public class MetaDataIndexStateService extends AbstractComponent {
rtBuilder.remove(index.getIndex().getName());
}
RoutingAllocation.Result routingResult = allocationService.reroute(
//no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
return allocationService.reroute(
ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build(),
"indices closed [" + indicesAsString + "]");
//no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
});
}
@ -188,11 +186,10 @@ public class MetaDataIndexStateService extends AbstractComponent {
rtBuilder.addAsFromCloseToOpen(updatedState.metaData().getIndexSafe(index.getIndex()));
}
RoutingAllocation.Result routingResult = allocationService.reroute(
//no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
return allocationService.reroute(
ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build(),
"indices opened [" + indicesAsString + "]");
//no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
});
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
@ -63,15 +64,21 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
private final IndicesService indicesService;
private final MetaDataCreateIndexService metaDataCreateIndexService;
private final NodeServicesProvider nodeServicesProvider;
private final IndexScopedSettings indexScopedSettings;
@Inject
public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService, MetaDataCreateIndexService metaDataCreateIndexService, AliasValidator aliasValidator, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) {
public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService,
MetaDataCreateIndexService metaDataCreateIndexService,
AliasValidator aliasValidator, IndicesService indicesService,
NodeServicesProvider nodeServicesProvider,
IndexScopedSettings indexScopedSettings) {
super(settings);
this.clusterService = clusterService;
this.aliasValidator = aliasValidator;
this.indicesService = indicesService;
this.metaDataCreateIndexService = metaDataCreateIndexService;
this.nodeServicesProvider = nodeServicesProvider;
this.indexScopedSettings = indexScopedSettings;
}
public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
@ -260,6 +267,14 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
validationErrors.add("template must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
}
try {
indexScopedSettings.validate(request.settings);
} catch (IllegalArgumentException iae) {
validationErrors.add(iae.getMessage());
for (Throwable t : iae.getSuppressed()) {
validationErrors.add(t.getMessage());
}
}
List<String> indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings);
validationErrors.addAll(indexSettingsValidation);
if (!validationErrors.isEmpty()) {

View File

@ -100,16 +100,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
* Returns true if this index can be supported by the current version of elasticsearch
*/
private static boolean isSupportedVersion(IndexMetaData indexMetaData) {
if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1)) {
// The index was created with elasticsearch that was using Lucene 5.2.1
return true;
}
if (indexMetaData.getMinimumCompatibleVersion() != null &&
indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_5_0_0)) {
//The index was upgraded we can work with it
return true;
}
return false;
return indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1);
}
/**

View File

@ -33,7 +33,6 @@ import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
@ -271,8 +270,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
ClusterState updatedState = ClusterState.builder(currentState).metaData(metaDataBuilder).routingTable(routingTableBuilder.build()).blocks(blocks).build();
// now, reroute in case things change that require it (like number of replicas)
RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update");
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
updatedState = allocationService.reroute(updatedState, "settings update");
try {
for (Index index : openIndices) {
final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index);

View File

@ -96,7 +96,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
* @param version the version of the node
*/
public DiscoveryNode(final String id, TransportAddress address, Version version) {
this(id, address, Collections.emptyMap(), Collections.emptySet(), version);
this(id, address, Collections.emptyMap(), EnumSet.allOf(Role.class), version);
}
/**

View File

@ -106,12 +106,7 @@ public class DelayedAllocationService extends AbstractLifecycleComponent impleme
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
removeIfSameTask(this);
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, "assign delayed unassigned shards");
if (routingResult.changed()) {
return ClusterState.builder(currentState).routingResult(routingResult).build();
} else {
return currentState;
}
return allocationService.reroute(currentState, "assign delayed unassigned shards");
}
@Override

View File

@ -25,7 +25,6 @@ import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
@ -96,12 +95,7 @@ public class RoutingService extends AbstractLifecycleComponent {
@Override
public ClusterState execute(ClusterState currentState) {
rerouting.set(false);
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, reason);
if (!routingResult.changed()) {
// no state changed
return currentState;
}
return ClusterState.builder(currentState).routingResult(routingResult).build();
return allocationService.reroute(currentState, reason);
}
@Override

View File

@ -32,7 +32,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.Result;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
@ -79,15 +78,15 @@ public class AllocationService extends AbstractComponent {
* Applies the started shards. Note, only initializing ShardRouting instances that exist in the routing table should be
* provided as parameter and no duplicates should be contained.
* <p>
* If the same instance of the routing table is returned, then no change has been made.</p>
* If the same instance of the {@link ClusterState} is returned, then no change has been made.</p>
*/
public Result applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards) {
public ClusterState applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards) {
return applyStartedShards(clusterState, startedShards, true);
}
public Result applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards, boolean withReroute) {
public ClusterState applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards, boolean withReroute) {
if (startedShards.isEmpty()) {
return Result.unchanged(clusterState);
return clusterState;
}
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
@ -100,35 +99,35 @@ public class AllocationService extends AbstractComponent {
reroute(allocation);
}
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
return buildResultAndLogHealthChange(allocation, "shards started [" + startedShardsAsString + "] ...");
return buildResultAndLogHealthChange(clusterState, allocation, "shards started [" + startedShardsAsString + "] ...");
}
protected Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason) {
return buildResultAndLogHealthChange(allocation, reason, new RoutingExplanations());
protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason) {
return buildResultAndLogHealthChange(oldState, allocation, reason, new RoutingExplanations());
}
protected Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason, RoutingExplanations explanations) {
RoutingTable oldRoutingTable = allocation.routingTable();
protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason,
RoutingExplanations explanations) {
RoutingTable oldRoutingTable = oldState.routingTable();
RoutingNodes newRoutingNodes = allocation.routingNodes();
final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(oldRoutingTable.version(), newRoutingNodes).build();
MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges(newRoutingTable);
assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata
final ClusterState newState = ClusterState.builder(oldState).routingTable(newRoutingTable).metaData(newMetaData).build();
logClusterHealthStateChange(
new ClusterStateHealth(ClusterState.builder(clusterName).
metaData(allocation.metaData()).routingTable(oldRoutingTable).build()),
new ClusterStateHealth(ClusterState.builder(clusterName).
metaData(newMetaData).routingTable(newRoutingTable).build()),
new ClusterStateHealth(oldState),
new ClusterStateHealth(newState),
reason
);
return Result.changed(newRoutingTable, newMetaData, explanations);
return newState;
}
public Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
public ClusterState applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)),
Collections.emptyList());
}
public Result applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards) {
public ClusterState applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards) {
return applyFailedShards(clusterState, failedShards, Collections.emptyList());
}
@ -138,20 +137,20 @@ public class AllocationService extends AbstractComponent {
* are no routing entries in the routing table.
*
* <p>
* If the same instance of the routing table is returned, then no change has been made.</p>
* If the same instance of ClusterState is returned, then no change has been made.</p>
*/
public Result applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards,
List<FailedRerouteAllocation.StaleShard> staleShards) {
public ClusterState applyFailedShards(final ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards,
List<FailedRerouteAllocation.StaleShard> staleShards) {
if (staleShards.isEmpty() && failedShards.isEmpty()) {
return Result.unchanged(clusterState);
return clusterState;
}
clusterState = IndexMetaDataUpdater.removeStaleIdsWithoutRoutings(clusterState, staleShards);
ClusterState tmpState = IndexMetaDataUpdater.removeStaleIdsWithoutRoutings(clusterState, staleShards);
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
RoutingNodes routingNodes = getMutableRoutingNodes(tmpState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
long currentNanoTime = currentNanoTime();
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards,
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, tmpState, failedShards,
clusterInfoService.getClusterInfo(), currentNanoTime);
for (FailedRerouteAllocation.FailedShard failedShardEntry : failedShards) {
@ -178,14 +177,14 @@ public class AllocationService extends AbstractComponent {
reroute(allocation);
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.routingEntry.shardId().toString());
return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ...");
return buildResultAndLogHealthChange(clusterState, allocation, "shards failed [" + failedShardsAsString + "] ...");
}
/**
* unassigned an shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas
* if needed.
*/
public RoutingAllocation.Result deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
public ClusterState deassociateDeadNodes(final ClusterState clusterState, boolean reroute, String reason) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
@ -200,9 +199,9 @@ public class AllocationService extends AbstractComponent {
}
if (allocation.routingNodesChanged() == false) {
return Result.unchanged(clusterState);
return clusterState;
}
return buildResultAndLogHealthChange(allocation, reason);
return buildResultAndLogHealthChange(clusterState, allocation, reason);
}
/**
@ -244,7 +243,7 @@ public class AllocationService extends AbstractComponent {
.collect(Collectors.joining(", "));
}
public Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
public CommandsResult reroute(final ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// we don't shuffle the unassigned shards here, to try and get as close as possible to
// a consistent result of the effect the commands have on the routing
@ -261,25 +260,25 @@ public class AllocationService extends AbstractComponent {
// the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute
reroute(allocation);
return buildResultAndLogHealthChange(allocation, "reroute commands", explanations);
return new CommandsResult(explanations, buildResultAndLogHealthChange(clusterState, allocation, "reroute commands"));
}
/**
* Reroutes the routing table based on the live nodes.
* <p>
* If the same instance of the routing table is returned, then no change has been made.
* If the same instance of ClusterState is returned, then no change has been made.
*/
public Result reroute(ClusterState clusterState, String reason) {
public ClusterState reroute(ClusterState clusterState, String reason) {
return reroute(clusterState, reason, false);
}
/**
* Reroutes the routing table based on the live nodes.
* <p>
* If the same instance of the routing table is returned, then no change has been made.
* If the same instance of ClusterState is returned, then no change has been made.
*/
protected Result reroute(ClusterState clusterState, String reason, boolean debug) {
protected ClusterState reroute(final ClusterState clusterState, String reason, boolean debug) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
@ -288,9 +287,9 @@ public class AllocationService extends AbstractComponent {
allocation.debugDecision(debug);
reroute(allocation);
if (allocation.routingNodesChanged() == false) {
return Result.unchanged(clusterState);
return clusterState;
}
return buildResultAndLogHealthChange(allocation, reason);
return buildResultAndLogHealthChange(clusterState, allocation, reason);
}
private void logClusterHealthStateChange(ClusterStateHealth previousStateHealth, ClusterStateHealth newStateHealth, String reason) {
@ -368,4 +367,39 @@ public class AllocationService extends AbstractComponent {
protected long currentNanoTime() {
return System.nanoTime();
}
/**
* this class is used to describe results of applying a set of
* {@link org.elasticsearch.cluster.routing.allocation.command.AllocationCommand}
*/
public static class CommandsResult {
private final RoutingExplanations explanations;
private final ClusterState clusterState;
/**
* Creates a new {@link CommandsResult}
* @param explanations Explanation for the reroute actions
* @param clusterState Resulting cluster state
*/
private CommandsResult(RoutingExplanations explanations, ClusterState clusterState) {
this.clusterState = clusterState;
this.explanations = explanations;
}
/**
* Get the explanation of this result
*/
public RoutingExplanations explanations() {
return explanations;
}
/**
* thre resulting cluster state, after the commands were applied
*/
public ClusterState getClusterState() {
return clusterState;
}
}
}

View File

@ -46,83 +46,6 @@ import static java.util.Collections.unmodifiableSet;
*/
public class RoutingAllocation {
/**
* this class is used to describe results of a {@link RoutingAllocation}
*/
public static class Result {
private final boolean changed;
private final RoutingTable routingTable;
private final MetaData metaData;
private final RoutingExplanations explanations;
/**
* Creates a new {@link RoutingAllocation.Result} where no change to the routing table was made.
* @param clusterState the unchanged {@link ClusterState}
*/
public static Result unchanged(ClusterState clusterState) {
return new Result(false, clusterState.routingTable(), clusterState.metaData(), new RoutingExplanations());
}
/**
* Creates a new {@link RoutingAllocation.Result} where changes were made to the routing table.
* @param routingTable the {@link RoutingTable} this Result references
* @param metaData the {@link MetaData} this Result references
* @param explanations Explanation for the reroute actions
*/
public static Result changed(RoutingTable routingTable, MetaData metaData, RoutingExplanations explanations) {
return new Result(true, routingTable, metaData, explanations);
}
/**
* Creates a new {@link RoutingAllocation.Result}
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
* @param routingTable the {@link RoutingTable} this Result references
* @param metaData the {@link MetaData} this Result references
* @param explanations Explanation for the reroute actions
*/
private Result(boolean changed, RoutingTable routingTable, MetaData metaData, RoutingExplanations explanations) {
this.changed = changed;
this.routingTable = routingTable;
this.metaData = metaData;
this.explanations = explanations;
}
/** determine whether the actual {@link RoutingTable} has been changed
* @return <code>true</code> if the {@link RoutingTable} has been changed by allocation. Otherwise <code>false</code>
*/
public boolean changed() {
return this.changed;
}
/**
* Get the {@link MetaData} referenced by this result
* @return referenced {@link MetaData}
*/
public MetaData metaData() {
return metaData;
}
/**
* Get the {@link RoutingTable} referenced by this result
* @return referenced {@link RoutingTable}
*/
public RoutingTable routingTable() {
return routingTable;
}
/**
* Get the explanation of this result
* @return explanation
*/
public RoutingExplanations explanations() {
return explanations;
}
}
private final AllocationDeciders deciders;
private final RoutingNodes routingNodes;

View File

@ -0,0 +1,205 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
import org.elasticsearch.common.Nullable;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
/**
* Represents the allocation decision by an allocator for an unassigned shard.
*/
public class UnassignedShardDecision {
/** a constant representing a shard decision where no decision was taken */
public static final UnassignedShardDecision DECISION_NOT_TAKEN =
new UnassignedShardDecision(null, null, null, null, null, null);
@Nullable
private final Decision finalDecision;
@Nullable
private final AllocationStatus allocationStatus;
@Nullable
private final String finalExplanation;
@Nullable
private final String assignedNodeId;
@Nullable
private final String allocationId;
@Nullable
private final Map<String, Decision> nodeDecisions;
private UnassignedShardDecision(Decision finalDecision,
AllocationStatus allocationStatus,
String finalExplanation,
String assignedNodeId,
String allocationId,
Map<String, Decision> nodeDecisions) {
assert finalExplanation != null || finalDecision == null :
"if a decision was taken, there must be an explanation for it";
assert assignedNodeId != null || finalDecision == null || finalDecision.type() != Type.YES :
"a yes decision must have a node to assign the shard to";
assert allocationStatus != null || finalDecision == null || finalDecision.type() == Type.YES :
"only a yes decision should not have an allocation status";
assert allocationId == null || assignedNodeId != null :
"allocation id can only be null if the assigned node is null";
this.finalDecision = finalDecision;
this.allocationStatus = allocationStatus;
this.finalExplanation = finalExplanation;
this.assignedNodeId = assignedNodeId;
this.allocationId = allocationId;
this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
}
/**
* Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision.
*/
public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus, String explanation) {
return noDecision(allocationStatus, explanation, null);
}
/**
* Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision,
* as well as the individual node-level decisions that comprised the final NO decision.
*/
public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus,
String explanation,
@Nullable Map<String, Decision> nodeDecisions) {
Objects.requireNonNull(explanation, "explanation must not be null");
Objects.requireNonNull(allocationStatus, "allocationStatus must not be null");
return new UnassignedShardDecision(Decision.NO, allocationStatus, explanation, null, null, nodeDecisions);
}
/**
* Creates a THROTTLE decision with the given explanation and individual node-level decisions that
* comprised the final THROTTLE decision.
*/
public static UnassignedShardDecision throttleDecision(String explanation,
Map<String, Decision> nodeDecisions) {
Objects.requireNonNull(explanation, "explanation must not be null");
return new UnassignedShardDecision(Decision.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, explanation, null, null,
nodeDecisions);
}
/**
* Creates a YES decision with the given explanation and individual node-level decisions that
* comprised the final YES decision, along with the node id to which the shard is assigned and
* the allocation id for the shard, if available.
*/
public static UnassignedShardDecision yesDecision(String explanation,
String assignedNodeId,
@Nullable String allocationId,
Map<String, Decision> nodeDecisions) {
Objects.requireNonNull(explanation, "explanation must not be null");
Objects.requireNonNull(assignedNodeId, "assignedNodeId must not be null");
return new UnassignedShardDecision(Decision.YES, null, explanation, assignedNodeId, allocationId, nodeDecisions);
}
/**
* Returns <code>true</code> if a decision was taken by the allocator, {@code false} otherwise.
* If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}.
*/
public boolean isDecisionTaken() {
return finalDecision != null;
}
/**
* Returns the final decision made by the allocator on whether to assign the unassigned shard.
* This value can only be {@code null} if {@link #isDecisionTaken()} returns {@code false}.
*/
@Nullable
public Decision getFinalDecision() {
return finalDecision;
}
/**
* Returns the final decision made by the allocator on whether to assign the unassigned shard.
* Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will
* throw an {@code IllegalArgumentException}.
*/
public Decision getFinalDecisionSafe() {
if (isDecisionTaken() == false) {
throw new IllegalArgumentException("decision must have been taken in order to return the final decision");
}
return finalDecision;
}
/**
* Returns the status of an unsuccessful allocation attempt. This value will be {@code null} if
* no decision was taken or if the decision was {@link Decision.Type#YES}.
*/
@Nullable
public AllocationStatus getAllocationStatus() {
return allocationStatus;
}
/**
* Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}.
*/
@Nullable
public String getFinalExplanation() {
return finalExplanation;
}
/**
* Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}.
* Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will
* throw an {@code IllegalArgumentException}.
*/
public String getFinalExplanationSafe() {
if (isDecisionTaken() == false) {
throw new IllegalArgumentException("decision must have been taken in order to return the final explanation");
}
return finalExplanation;
}
/**
* Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecision()} returns
* a value other than {@link Decision.Type#YES}, in which case this returns {@code null}.
*/
@Nullable
public String getAssignedNodeId() {
return assignedNodeId;
}
/**
* Gets the allocation id for the existing shard copy that the allocator is assigning the shard to.
* This method returns a non-null value iff {@link #getAssignedNodeId()} returns a non-null value
* and the node on which the shard is assigned already has a shard copy with an in-sync allocation id
* that we can re-use.
*/
@Nullable
public String getAllocationId() {
return allocationId;
}
/**
* Gets the individual node-level decisions that went into making the final decision as represented by
* {@link #getFinalDecision()}. The map that is returned has the node id as the key and a {@link Decision}
* as the decision for the given node.
*/
@Nullable
public Map<String, Decision> getNodeDecisions() {
return nodeDecisions;
}
}

View File

@ -74,7 +74,7 @@ public class AllocationDeciders extends AllocationDecider {
// short track if a NO is returned.
if (decision == Decision.NO) {
if (logger.isTraceEnabled()) {
logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName());
logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.node(), allocationDecider.getClass().getSimpleName());
}
// short circuit only if debugging is not enabled
if (!allocation.debugDecision()) {

View File

@ -49,7 +49,7 @@ import org.elasticsearch.common.settings.Settings;
* To enable allocation awareness in this example nodes should contain a value
* for the <tt>rack_id</tt> key like:
* <pre>
* node.rack_id:1
* node.attr.rack_id:1
* </pre>
* <p>
* Awareness can also be used to prevent over-allocation in the case of node or

View File

@ -19,8 +19,15 @@
package org.elasticsearch.common.geo;
import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.geo.GeoEncodingUtils;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.apache.lucene.util.BitUtil;
import org.apache.lucene.util.BytesRef;
import java.util.Arrays;
import static org.elasticsearch.common.geo.GeoHashUtils.mortonEncode;
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
@ -88,6 +95,24 @@ public final class GeoPoint {
return this;
}
// todo this is a crutch because LatLonPoint doesn't have a helper for returning .stringValue()
// todo remove with next release of lucene
public GeoPoint resetFromIndexableField(IndexableField field) {
if (field instanceof LatLonPoint) {
BytesRef br = field.binaryValue();
byte[] bytes = Arrays.copyOfRange(br.bytes, br.offset, br.length);
return this.reset(
GeoEncodingUtils.decodeLatitude(bytes, 0),
GeoEncodingUtils.decodeLongitude(bytes, Integer.BYTES));
} else if (field instanceof LatLonDocValuesField) {
long encoded = (long)(field.numericValue());
return this.reset(
GeoEncodingUtils.decodeLatitude((int)(encoded >>> 32)),
GeoEncodingUtils.decodeLongitude((int)encoded));
}
return resetFromIndexHash(Long.parseLong(field.stringValue()));
}
public GeoPoint resetFromGeoHash(String geohash) {
final long hash = mortonEncode(geohash);
return this.reset(GeoPointField.decodeLatitude(hash), GeoPointField.decodeLongitude(hash));

View File

@ -22,17 +22,18 @@ package org.elasticsearch.common.logging;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.MessageFactory;
import org.apache.logging.log4j.spi.ExtendedLogger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import java.util.Locale;
import java.util.function.Function;
/**
* Factory to get {@link Logger}s
*/
public abstract class ESLoggerFactory {
public final class ESLoggerFactory {
private ESLoggerFactory() {
}
public static final Setting<Level> LOG_DEFAULT_LEVEL_SETTING =
new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope);
@ -41,40 +42,27 @@ public abstract class ESLoggerFactory {
Property.Dynamic, Property.NodeScope);
public static Logger getLogger(String prefix, String name) {
name = name.intern();
final Logger logger = getLogger(new PrefixMessageFactory(), name);
final MessageFactory factory = logger.getMessageFactory();
// in some cases, we initialize the logger before we are ready to set the prefix
// we can not re-initialize the logger, so the above getLogger might return an existing
// instance without the prefix set; thus, we hack around this by resetting the prefix
if (prefix != null && factory instanceof PrefixMessageFactory) {
((PrefixMessageFactory) factory).setPrefix(prefix.intern());
}
return logger;
return getLogger(prefix, LogManager.getLogger(name));
}
public static Logger getLogger(MessageFactory messageFactory, String name) {
return LogManager.getLogger(name, messageFactory);
public static Logger getLogger(String prefix, Class<?> clazz) {
return getLogger(prefix, LogManager.getLogger(clazz));
}
public static Logger getLogger(String prefix, Logger logger) {
return new PrefixLogger((ExtendedLogger)logger, logger.getName(), prefix);
}
public static Logger getLogger(Class<?> clazz) {
return getLogger(null, clazz);
}
public static Logger getLogger(String name) {
return getLogger((String)null, name);
}
public static DeprecationLogger getDeprecationLogger(String name) {
return new DeprecationLogger(getLogger(name));
}
public static DeprecationLogger getDeprecationLogger(String prefix, String name) {
return new DeprecationLogger(getLogger(prefix, name));
return getLogger(null, name);
}
public static Logger getRootLogger() {
return LogManager.getRootLogger();
}
private ESLoggerFactory() {
// Utility class can't be built.
}
}

View File

@ -30,7 +30,8 @@ import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration;
import org.apache.logging.log4j.core.config.composite.CompositeConfiguration;
import org.apache.logging.log4j.core.config.properties.PropertiesConfiguration;
import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationFactory;
import org.elasticsearch.Version;
import org.elasticsearch.cli.ExitCodes;
import org.elasticsearch.cli.UserException;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings;
@ -44,7 +45,6 @@ import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
@ -52,7 +52,7 @@ import java.util.Set;
public class LogConfigurator {
public static void configure(final Environment environment, final boolean resolveConfig) throws IOException {
public static void configure(final Environment environment, final boolean resolveConfig) throws IOException, UserException {
final Settings settings = environment.settings();
setLogConfigurationSystemProperty(environment, settings);
@ -77,17 +77,25 @@ public class LogConfigurator {
return FileVisitResult.CONTINUE;
}
});
if (configurations.isEmpty()) {
throw new UserException(
ExitCodes.CONFIG,
"no log4j2.properties found; tried [" + environment.configFile() + "] and its subdirectories");
}
context.start(new CompositeConfiguration(configurations));
}
if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) {
Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings));
final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings);
Loggers.setLevel(ESLoggerFactory.getRootLogger(), level);
}
final Map<String, String> levels = settings.filter(ESLoggerFactory.LOG_LEVEL_SETTING::match).getAsMap();
for (String key : levels.keySet()) {
final Level level = ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings);
Loggers.setLevel(Loggers.getLogger(key.substring("logger.".length())), level);
Loggers.setLevel(ESLoggerFactory.getLogger(key.substring("logger.".length())), level);
}
}

View File

@ -35,10 +35,12 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.node.Node;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import static java.util.Arrays.asList;
import static javax.security.auth.login.Configuration.getConfiguration;
import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
/**
@ -46,24 +48,8 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
*/
public class Loggers {
static final String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
public static final String SPACE = " ";
private static boolean consoleLoggingEnabled = true;
public static void disableConsoleLogging() {
consoleLoggingEnabled = false;
}
public static void enableConsoleLogging() {
consoleLoggingEnabled = true;
}
public static boolean consoleLoggingEnabled() {
return consoleLoggingEnabled;
}
public static Logger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
@ -82,10 +68,16 @@ public class Loggers {
}
public static Logger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), settings, prefixes);
final List<String> prefixesList = prefixesList(settings, prefixes);
return getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()]));
}
public static Logger getLogger(String loggerName, Settings settings, String... prefixes) {
final List<String> prefixesList = prefixesList(settings, prefixes);
return getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()]));
}
private static List<String> prefixesList(Settings settings, String... prefixes) {
List<String> prefixesList = new ArrayList<>();
if (Node.NODE_NAME_SETTING.exists(settings)) {
prefixesList.add(Node.NODE_NAME_SETTING.get(settings));
@ -93,26 +85,31 @@ public class Loggers {
if (prefixes != null && prefixes.length > 0) {
prefixesList.addAll(asList(prefixes));
}
return getLogger(getLoggerName(loggerName), prefixesList.toArray(new String[prefixesList.size()]));
return prefixesList;
}
public static Logger getLogger(Logger parentLogger, String s) {
return ESLoggerFactory.getLogger(parentLogger.<MessageFactory>getMessageFactory(), getLoggerName(parentLogger.getName() + s));
assert parentLogger instanceof PrefixLogger;
return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s);
}
public static Logger getLogger(String s) {
return ESLoggerFactory.getLogger(getLoggerName(s));
return ESLoggerFactory.getLogger(s);
}
public static Logger getLogger(Class<?> clazz) {
return ESLoggerFactory.getLogger(getLoggerName(buildClassLoggerName(clazz)));
return ESLoggerFactory.getLogger(clazz);
}
public static Logger getLogger(Class<?> clazz, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), prefixes);
return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz);
}
public static Logger getLogger(String name, String... prefixes) {
return ESLoggerFactory.getLogger(formatPrefix(prefixes), name);
}
private static String formatPrefix(String... prefixes) {
String prefix = null;
if (prefixes != null && prefixes.length > 0) {
StringBuilder sb = new StringBuilder();
@ -130,7 +127,7 @@ public class Loggers {
prefix = sb.toString();
}
}
return ESLoggerFactory.getLogger(prefix, getLoggerName(name));
return prefix;
}
/**
@ -148,30 +145,23 @@ public class Loggers {
}
public static void setLevel(Logger logger, Level level) {
if (!"".equals(logger.getName())) {
if (!LogManager.ROOT_LOGGER_NAME.equals(logger.getName())) {
Configurator.setLevel(logger.getName(), level);
} else {
LoggerContext ctx = LoggerContext.getContext(false);
Configuration config = ctx.getConfiguration();
LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
final LoggerContext ctx = LoggerContext.getContext(false);
final Configuration config = ctx.getConfiguration();
final LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
loggerConfig.setLevel(level);
ctx.updateLoggers();
}
}
private static String buildClassLoggerName(Class<?> clazz) {
String name = clazz.getName();
if (name.startsWith("org.elasticsearch.")) {
name = Classes.getPackageName(clazz);
// we have to descend the hierarchy
final LoggerContext ctx = LoggerContext.getContext(false);
for (final LoggerConfig loggerConfig : ctx.getConfiguration().getLoggers().values()) {
if (LogManager.ROOT_LOGGER_NAME.equals(logger.getName()) || loggerConfig.getName().startsWith(logger.getName() + ".")) {
Configurator.setLevel(loggerConfig.getName(), level);
}
}
return name;
}
private static String getLoggerName(String name) {
if (name.startsWith("org.elasticsearch.")) {
name = name.substring("org.elasticsearch.".length());
}
return commonPrefix + name;
}
public static void addAppender(final Logger logger, final Appender appender) {

View File

@ -0,0 +1,70 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Marker;
import org.apache.logging.log4j.MarkerManager;
import org.apache.logging.log4j.message.Message;
import org.apache.logging.log4j.spi.ExtendedLogger;
import org.apache.logging.log4j.spi.ExtendedLoggerWrapper;
import java.lang.ref.WeakReference;
import java.util.WeakHashMap;
class PrefixLogger extends ExtendedLoggerWrapper {
// we can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds
// a permanent reference to the marker; however, we have transient markers from index-level and
// shard-level components so this would effectively be a memory leak
private static final WeakHashMap<String, WeakReference<Marker>> markers = new WeakHashMap<>();
private final Marker marker;
public String prefix() {
return marker.getName();
}
PrefixLogger(final ExtendedLogger logger, final String name, final String prefix) {
super(logger, name, null);
final String actualPrefix = (prefix == null ? "" : prefix).intern();
final Marker actualMarker;
// markers is not thread-safe, so we synchronize access
synchronized (markers) {
final WeakReference<Marker> marker = markers.get(actualPrefix);
final Marker maybeMarker = marker == null ? null : marker.get();
if (maybeMarker == null) {
actualMarker = new MarkerManager.Log4jMarker(actualPrefix);
markers.put(actualPrefix, new WeakReference<>(actualMarker));
} else {
actualMarker = maybeMarker;
}
}
this.marker = actualMarker;
}
@Override
public void logMessage(final String fqcn, final Level level, final Marker marker, final Message message, final Throwable t) {
assert marker == null;
super.logMessage(fqcn, level, this.marker, message, t);
}
}

View File

@ -1,221 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging;
import org.apache.logging.log4j.message.Message;
import org.apache.logging.log4j.message.MessageFactory2;
import org.apache.logging.log4j.message.ObjectMessage;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.message.SimpleMessage;
public class PrefixMessageFactory implements MessageFactory2 {
private String prefix = "";
public String getPrefix() {
return prefix;
}
public void setPrefix(String prefix) {
this.prefix = prefix;
}
@Override
public Message newMessage(Object message) {
return new PrefixObjectMessage(prefix, message);
}
private static class PrefixObjectMessage extends ObjectMessage {
private final String prefix;
private final Object object;
private String prefixObjectString;
private PrefixObjectMessage(String prefix, Object object) {
super(object);
this.prefix = prefix;
this.object = object;
}
@Override
public String getFormattedMessage() {
if (prefixObjectString == null) {
prefixObjectString = prefix + super.getFormattedMessage();
}
return prefixObjectString;
}
@Override
public void formatTo(StringBuilder buffer) {
buffer.append(prefix);
super.formatTo(buffer);
}
@Override
public Object[] getParameters() {
return new Object[]{prefix, object};
}
}
@Override
public Message newMessage(String message) {
return new PrefixSimpleMessage(prefix, message);
}
private static class PrefixSimpleMessage extends SimpleMessage {
private final String prefix;
private String prefixMessage;
PrefixSimpleMessage(String prefix, String message) {
super(message);
this.prefix = prefix;
}
PrefixSimpleMessage(String prefix, CharSequence charSequence) {
super(charSequence);
this.prefix = prefix;
}
@Override
public String getFormattedMessage() {
if (prefixMessage == null) {
prefixMessage = prefix + super.getFormattedMessage();
}
return prefixMessage;
}
@Override
public void formatTo(StringBuilder buffer) {
buffer.append(prefix);
super.formatTo(buffer);
}
@Override
public int length() {
return prefixMessage.length();
}
@Override
public char charAt(int index) {
return prefixMessage.charAt(index);
}
@Override
public CharSequence subSequence(int start, int end) {
return prefixMessage.subSequence(start, end);
}
}
@Override
public Message newMessage(String message, Object... params) {
return new PrefixParameterizedMessage(prefix, message, params);
}
private static class PrefixParameterizedMessage extends ParameterizedMessage {
private static ThreadLocal<StringBuilder> threadLocalStringBuilder = ThreadLocal.withInitial(StringBuilder::new);
private final String prefix;
private String formattedMessage;
private PrefixParameterizedMessage(String prefix, String messagePattern, Object... arguments) {
super(messagePattern, arguments);
this.prefix = prefix;
}
@Override
public String getFormattedMessage() {
if (formattedMessage == null) {
final StringBuilder buffer = threadLocalStringBuilder.get();
buffer.setLength(0);
formatTo(buffer);
formattedMessage = buffer.toString();
}
return formattedMessage;
}
@Override
public void formatTo(StringBuilder buffer) {
buffer.append(prefix);
super.formatTo(buffer);
}
}
@Override
public Message newMessage(CharSequence charSequence) {
return new PrefixSimpleMessage(prefix, charSequence);
}
@Override
public Message newMessage(String message, Object p0) {
return new PrefixParameterizedMessage(prefix, message, p0);
}
@Override
public Message newMessage(String message, Object p0, Object p1) {
return new PrefixParameterizedMessage(prefix, message, p0, p1);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7);
}
@Override
public Message newMessage(
String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7, p8);
}
@Override
public Message newMessage(
String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8, Object p9) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9);
}
}

View File

@ -25,19 +25,12 @@ import org.elasticsearch.common.logging.Loggers;
/** An InfoStream (for Lucene's IndexWriter) that redirects
* messages to "lucene.iw.ifd" and "lucene.iw" Logger.trace. */
public final class LoggerInfoStream extends InfoStream {
/** Used for component-specific logging: */
/** Logger for everything */
private final Logger logger;
private final Logger parentLogger;
/** Logger for IndexFileDeleter */
private final Logger ifdLogger;
public LoggerInfoStream(Logger parentLogger) {
logger = Loggers.getLogger(parentLogger, ".lucene.iw");
ifdLogger = Loggers.getLogger(parentLogger, ".lucene.iw.ifd");
public LoggerInfoStream(final Logger parentLogger) {
this.parentLogger = parentLogger;
}
@Override
@ -53,14 +46,11 @@ public final class LoggerInfoStream extends InfoStream {
}
private Logger getLogger(String component) {
if (component.equals("IFD")) {
return ifdLogger;
} else {
return logger;
}
return Loggers.getLogger(parentLogger, "." + component);
}
@Override
public void close() {
}
}

View File

@ -42,11 +42,15 @@ import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.tasks.RawTaskStatus;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportInterceptor;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.local.LocalTransport;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
/**
* A module to handle registering and binding all network related classes.
@ -54,7 +58,6 @@ import java.util.List;
public class NetworkModule extends AbstractModule {
public static final String TRANSPORT_TYPE_KEY = "transport.type";
public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
public static final String HTTP_TYPE_KEY = "http.type";
public static final String LOCAL_TRANSPORT = "local";
public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default";
@ -65,8 +68,6 @@ public class NetworkModule extends AbstractModule {
public static final Setting<String> HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope);
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope);
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope);
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING =
Setting.simpleString(TRANSPORT_SERVICE_TYPE_KEY, Property.NodeScope);
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope);
private final NetworkService networkService;
@ -74,10 +75,10 @@ public class NetworkModule extends AbstractModule {
private final boolean transportClient;
private final AllocationCommandRegistry allocationCommandRegistry = new AllocationCommandRegistry();
private final ExtensionPoint.SelectedType<TransportService> transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class);
private final ExtensionPoint.SelectedType<Transport> transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class);
private final ExtensionPoint.SelectedType<HttpServerTransport> httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class);
private final List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>();
private final List<TransportInterceptor> transportIntercetors = new ArrayList<>();
/**
* Creates a network module that custom networking classes can be plugged into.
@ -89,7 +90,6 @@ public class NetworkModule extends AbstractModule {
this.networkService = networkService;
this.settings = settings;
this.transportClient = transportClient;
registerTransportService("default", TransportService.class);
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
namedWriteables.add(new NamedWriteableRegistry.Entry(Task.Status.class, ReplicationTask.Status.NAME, ReplicationTask.Status::new));
namedWriteables.add(new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new));
@ -100,11 +100,6 @@ public class NetworkModule extends AbstractModule {
return transportClient;
}
/** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */
public void registerTransportService(String name, Class<? extends TransportService> clazz) {
transportServiceTypes.registerExtension(name, clazz);
}
/** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */
public void registerTransport(String name, Class<? extends Transport> clazz) {
transportTypes.registerExtension(name, clazz);
@ -149,9 +144,9 @@ public class NetworkModule extends AbstractModule {
@Override
protected void configure() {
bind(NetworkService.class).toInstance(networkService);
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, "default");
bindTransportService();
transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
bind(TransportInterceptor.class).toInstance(new CompositeTransportInterceptor(this.transportIntercetors));
if (transportClient == false) {
if (HTTP_ENABLED.get(settings)) {
bind(HttpServer.class).asEagerSingleton();
@ -181,4 +176,39 @@ public class NetworkModule extends AbstractModule {
public boolean canRegisterHttpExtensions() {
return transportClient == false;
}
/**
* Registers a new {@link TransportInterceptor}
*/
public void addTransportInterceptor(TransportInterceptor interceptor) {
this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null"));
}
static final class CompositeTransportInterceptor implements TransportInterceptor {
final List<TransportInterceptor> transportInterceptors;
private CompositeTransportInterceptor(List<TransportInterceptor> transportInterceptors) {
this.transportInterceptors = new ArrayList<>(transportInterceptors);
}
@Override
public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action, TransportRequestHandler<T> actualHandler) {
for (TransportInterceptor interceptor : this.transportInterceptors) {
actualHandler = interceptor.interceptHandler(action, actualHandler);
}
return actualHandler;
}
@Override
public AsyncSender interceptSender(AsyncSender sender) {
for (TransportInterceptor interceptor : this.transportInterceptors) {
sender = interceptor.interceptSender(sender);
}
return sender;
}
}
protected void bindTransportService() {
bind(TransportService.class).asEagerSingleton();
}
}

View File

@ -32,6 +32,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
/**
* Utilities for network interfaces / addresses binding and publishing.
@ -227,14 +228,15 @@ public abstract class NetworkUtils {
/** Returns addresses for the given interface (it must be marked up) */
static InetAddress[] getAddressesForInterface(String name) throws SocketException {
NetworkInterface intf = NetworkInterface.getByName(name);
if (intf == null) {
Optional<NetworkInterface> networkInterface = getInterfaces().stream().filter((netIf) -> name.equals(netIf.getName())).findFirst();
if (networkInterface.isPresent() == false) {
throw new IllegalArgumentException("No interface named '" + name + "' found, got " + getInterfaces());
}
if (!intf.isUp()) {
if (!networkInterface.get().isUp()) {
throw new IllegalArgumentException("Interface '" + name + "' is not up and running");
}
List<InetAddress> list = Collections.list(intf.getInetAddresses());
List<InetAddress> list = Collections.list(networkInterface.get().getInetAddresses());
if (list.isEmpty()) {
throw new IllegalArgumentException("Interface '" + name + "' has no internet addresses");
}

View File

@ -54,8 +54,8 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.fd.FaultDetection;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.env.Environment;
@ -226,7 +226,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
NetworkModule.HTTP_DEFAULT_TYPE_SETTING,
NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING,
NetworkModule.HTTP_TYPE_SETTING,
NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING,
NetworkModule.TRANSPORT_TYPE_SETTING,
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
HttpTransportSettings.SETTING_CORS_ENABLED,

View File

@ -19,7 +19,6 @@
package org.elasticsearch.discovery;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.settings.Setting;
@ -27,8 +26,8 @@ import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.discovery.local.LocalDiscovery;
import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.discovery.zen.ping.ZenPingService;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;

View File

@ -31,7 +31,6 @@ import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
@ -167,11 +166,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
}
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
currentState = ClusterState.builder(currentState).nodes(nodesBuilder).build();
RoutingAllocation.Result result = master.allocationService.reroute(currentState, "node_add");
if (result.changed()) {
currentState = ClusterState.builder(currentState).routingResult(result).build();
}
return currentState;
return master.allocationService.reroute(currentState, "node_add");
}
@Override
@ -234,9 +229,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
}
// reroute here, so we eagerly remove dead nodes from the routing
ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build();
RoutingAllocation.Result routingResult = master.allocationService.deassociateDeadNodes(
ClusterState.builder(updatedState).build(), true, "node stopped");
return ClusterState.builder(updatedState).routingResult(routingResult).build();
return master.allocationService.deassociateDeadNodes(updatedState, true, "node stopped");
}
@Override

View File

@ -17,11 +17,10 @@
* under the License.
*/
package org.elasticsearch.discovery.zen.elect;
package org.elasticsearch.discovery.zen;
import com.carrotsearch.hppc.ObjectContainer;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
@ -33,9 +32,11 @@ import org.elasticsearch.common.util.CollectionUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
/**
*
@ -45,17 +46,64 @@ public class ElectMasterService extends AbstractComponent {
public static final Setting<Integer> DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING =
Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope);
// This is the minimum version a master needs to be on, otherwise it gets ignored
// This is based on the minimum compatible version of the current version this node is on
private final Version minMasterVersion;
private final NodeComparator nodeComparator = new NodeComparator();
private volatile int minimumMasterNodes;
/**
* a class to encapsulate all the information about a candidate in a master election
* that is needed to decided which of the candidates should win
*/
public static class MasterCandidate {
public static final long UNRECOVERED_CLUSTER_VERSION = -1;
final DiscoveryNode node;
final long clusterStateVersion;
public MasterCandidate(DiscoveryNode node, long clusterStateVersion) {
Objects.requireNonNull(node);
assert clusterStateVersion >= -1 : "got: " + clusterStateVersion;
assert node.isMasterNode();
this.node = node;
this.clusterStateVersion = clusterStateVersion;
}
public DiscoveryNode getNode() {
return node;
}
public long getClusterStateVersion() {
return clusterStateVersion;
}
@Override
public String toString() {
return "Candidate{" +
"node=" + node +
", clusterStateVersion=" + clusterStateVersion +
'}';
}
/**
* compares two candidates to indicate which the a better master.
* A higher cluster state version is better
*
* @return -1 if c1 is a batter candidate, 1 if c2.
*/
public static int compare(MasterCandidate c1, MasterCandidate c2) {
// we explicitly swap c1 and c2 here. the code expects "better" is lower in a sorted
// list, so if c2 has a higher cluster state version, it needs to come first.
int ret = Long.compare(c2.clusterStateVersion, c1.clusterStateVersion);
if (ret == 0) {
ret = compareNodes(c1.getNode(), c2.getNode());
}
return ret;
}
}
@Inject
public ElectMasterService(Settings settings) {
super(settings);
this.minMasterVersion = Version.CURRENT.minimumCompatibilityVersion();
this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings);
logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes);
}
@ -69,16 +117,41 @@ public class ElectMasterService extends AbstractComponent {
}
public boolean hasEnoughMasterNodes(Iterable<DiscoveryNode> nodes) {
if (minimumMasterNodes < 1) {
return true;
}
int count = 0;
for (DiscoveryNode node : nodes) {
if (node.isMasterNode()) {
count++;
}
}
return count >= minimumMasterNodes;
return count > 0 && (minimumMasterNodes < 0 || count >= minimumMasterNodes);
}
public boolean hasEnoughCandidates(Collection<MasterCandidate> candidates) {
if (candidates.isEmpty()) {
return false;
}
if (minimumMasterNodes < 1) {
return true;
}
assert candidates.stream().map(MasterCandidate::getNode).collect(Collectors.toSet()).size() == candidates.size() :
"duplicates ahead: " + candidates;
return candidates.size() >= minimumMasterNodes;
}
/**
* Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt>
* if no master has been elected.
*/
public MasterCandidate electMaster(Collection<MasterCandidate> candidates) {
assert hasEnoughCandidates(candidates);
List<MasterCandidate> sortedCandidates = new ArrayList<>(candidates);
sortedCandidates.sort(MasterCandidate::compare);
return sortedCandidates.get(0);
}
/** selects the best active master to join, where multiple are discovered */
public DiscoveryNode tieBreakActiveMasters(Collection<DiscoveryNode> activeMasters) {
return activeMasters.stream().min(ElectMasterService::compareNodes).get();
}
public boolean hasTooManyMasterNodes(Iterable<DiscoveryNode> nodes) {
@ -107,7 +180,7 @@ public class ElectMasterService extends AbstractComponent {
*/
public List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {
ArrayList<DiscoveryNode> sortedNodes = CollectionUtils.iterableAsArrayList(nodes);
CollectionUtil.introSort(sortedNodes, nodeComparator);
CollectionUtil.introSort(sortedNodes, ElectMasterService::compareNodes);
return sortedNodes;
}
@ -130,25 +203,6 @@ public class ElectMasterService extends AbstractComponent {
return nextPossibleMasters.toArray(new DiscoveryNode[nextPossibleMasters.size()]);
}
/**
* Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt>
* if no master has been elected.
*/
public DiscoveryNode electMaster(Iterable<DiscoveryNode> nodes) {
List<DiscoveryNode> sortedNodes = sortedMasterNodes(nodes);
if (sortedNodes == null || sortedNodes.isEmpty()) {
return null;
}
DiscoveryNode masterNode = sortedNodes.get(0);
// Sanity check: maybe we don't end up here, because serialization may have failed.
if (masterNode.getVersion().before(minMasterVersion)) {
logger.warn("ignoring master [{}], because the version [{}] is lower than the minimum compatible version [{}]", masterNode, masterNode.getVersion(), minMasterVersion);
return null;
} else {
return masterNode;
}
}
private List<DiscoveryNode> sortedMasterNodes(Iterable<DiscoveryNode> nodes) {
List<DiscoveryNode> possibleNodes = CollectionUtils.iterableAsArrayList(nodes);
if (possibleNodes.isEmpty()) {
@ -161,21 +215,18 @@ public class ElectMasterService extends AbstractComponent {
it.remove();
}
}
CollectionUtil.introSort(possibleNodes, nodeComparator);
CollectionUtil.introSort(possibleNodes, ElectMasterService::compareNodes);
return possibleNodes;
}
private static class NodeComparator implements Comparator<DiscoveryNode> {
@Override
public int compare(DiscoveryNode o1, DiscoveryNode o2) {
if (o1.isMasterNode() && !o2.isMasterNode()) {
return -1;
}
if (!o1.isMasterNode() && o2.isMasterNode()) {
return 1;
}
return o1.getId().compareTo(o2.getId());
/** master nodes go before other nodes, with a secondary sort by id **/
private static int compareNodes(DiscoveryNode o1, DiscoveryNode o2) {
if (o1.isMasterNode() && !o2.isMasterNode()) {
return -1;
}
if (!o1.isMasterNode() && o2.isMasterNode()) {
return 1;
}
return o1.getId().compareTo(o2.getId());
}
}

View File

@ -33,7 +33,6 @@ import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractComponent;
@ -41,7 +40,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.membership.MembershipAction;
import java.util.ArrayList;
@ -457,17 +455,12 @@ public class NodeJoinController extends AbstractComponent {
if (nodesChanged) {
newState.nodes(nodesBuilder);
final ClusterState tmpState = newState.build();
RoutingAllocation.Result result = allocationService.reroute(tmpState, "node_join");
newState = ClusterState.builder(tmpState);
if (result.changed()) {
newState.routingResult(result);
}
return results.build(allocationService.reroute(newState.build(), "node_join"));
} else {
// we must return a new cluster state instance to force publishing. This is important
// for the joining node to finalize its join and set us as a master
return results.build(newState.build());
}
// we must return a new cluster state instance to force publishing. This is important
// for the joining node to finalize its join and set us as a master
return results.build(newState.build());
}
private ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List<DiscoveryNode> joiningNodes) {
@ -487,9 +480,8 @@ public class NodeJoinController extends AbstractComponent {
// now trim any left over dead nodes - either left there when the previous master stepped down
// or removed by us above
ClusterState tmpState = ClusterState.builder(currentState).nodes(nodesBuilder).blocks(clusterBlocks).build();
RoutingAllocation.Result result = allocationService.deassociateDeadNodes(tmpState, false,
"removed dead nodes on election");
return ClusterState.builder(tmpState).routingResult(result);
return ClusterState.builder(allocationService.deassociateDeadNodes(tmpState, false,
"removed dead nodes on election"));
}
@Override

View File

@ -24,7 +24,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
@ -39,7 +38,6 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
@ -56,7 +54,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.DiscoveryStats;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.fd.MasterFaultDetection;
import org.elasticsearch.discovery.zen.fd.NodesFaultDetection;
import org.elasticsearch.discovery.zen.membership.MembershipAction;
@ -76,13 +73,10 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
@ -146,9 +140,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
private final JoinThreadControl joinThreadControl;
/** counts the time this node has joined the cluster or have elected it self as master */
private final AtomicLong clusterJoinsCounter = new AtomicLong();
// must initialized in doStart(), when we have the allocationService set
private volatile NodeJoinController nodeJoinController;
private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
@ -284,8 +275,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
protected void doClose() {
masterFD.close();
nodesFD.close();
publishClusterState.close();
membership.close();
pingService.close();
}
@ -306,8 +295,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
}
@Override
public boolean nodeHasJoinedClusterOnce() {
return clusterJoinsCounter.get() > 0;
public ClusterState clusterState() {
return clusterService.state();
}
/** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
@ -318,7 +307,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
if (!clusterChangedEvent.state().getNodes().isLocalNodeElectedMaster()) {
throw new IllegalStateException("Shouldn't publish state when not master");
}
nodesFD.updateNodesAndPing(clusterChangedEvent.state());
try {
publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener);
} catch (FailedToCommitClusterStateException t) {
@ -338,6 +327,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
});
throw t;
}
// update the set of nodes to ping after the new cluster state has been published
nodesFD.updateNodesAndPing(clusterChangedEvent.state());
}
/**
* Gets the current set of nodes involved in the node fault detection.
* NB: for testing purposes
*/
public Set<DiscoveryNode> getFaultDetectionNodes() {
return nodesFD.getNodes();
}
@Override
@ -397,8 +397,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
joinThreadControl.markThreadAsDone(currentThread);
// we only starts nodesFD if we are master (it may be that we received a cluster state while pinging)
nodesFD.updateNodesAndPing(state); // start the nodes FD
long count = clusterJoinsCounter.incrementAndGet();
logger.trace("cluster joins counter set to [{}] (elected as master)", count);
}
@Override
@ -572,9 +570,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) {
return resultBuilder.build(rejoin.apply(remainingNodesClusterState, "not enough master nodes"));
} else {
final RoutingAllocation.Result routingResult =
allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks));
return resultBuilder.build(ClusterState.builder(remainingNodesClusterState).routingResult(routingResult).build());
return resultBuilder.build(allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks)));
}
}
@ -755,9 +751,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) {
// its a fresh update from the master as we transition from a start of not having a master to having one
logger.debug("got first state from fresh master [{}]", newClusterState.nodes().getMasterNodeId());
long count = clusterJoinsCounter.incrementAndGet();
logger.trace("updated cluster join cluster to [{}]", count);
return newClusterState;
}
@ -864,16 +857,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
} else if (nodeJoinController == null) {
throw new IllegalStateException("discovery module is not yet started");
} else {
// The minimum supported version for a node joining a master:
Version minimumNodeJoinVersion = localNode().getVersion().minimumCompatibilityVersion();
// Sanity check: maybe we don't end up here, because serialization may have failed.
if (node.getVersion().before(minimumNodeJoinVersion)) {
callback.onFailure(
new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
);
return;
}
// try and connect to the node, if it fails, we can raise an exception back to the client...
transportService.connectToNode(node);
@ -892,14 +875,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
private DiscoveryNode findMaster() {
logger.trace("starting to ping");
ZenPing.PingResponse[] fullPingResponses = pingService.pingAndWait(pingTimeout);
List<ZenPing.PingResponse> fullPingResponses = pingService.pingAndWait(pingTimeout).toList();
if (fullPingResponses == null) {
logger.trace("No full ping responses");
return null;
}
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
if (fullPingResponses.length == 0) {
if (fullPingResponses.size() == 0) {
sb.append(" {none}");
} else {
for (ZenPing.PingResponse pingResponse : fullPingResponses) {
@ -909,69 +892,57 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
logger.trace("full ping responses:{}", sb);
}
final DiscoveryNode localNode = clusterService.localNode();
// add our selves
assert fullPingResponses.stream().map(ZenPing.PingResponse::node)
.filter(n -> n.equals(localNode)).findAny().isPresent() == false;
fullPingResponses.add(new ZenPing.PingResponse(localNode, null, clusterService.state()));
// filter responses
final List<ZenPing.PingResponse> pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger);
final DiscoveryNode localNode = clusterService.localNode();
List<DiscoveryNode> pingMasters = new ArrayList<>();
List<DiscoveryNode> activeMasters = new ArrayList<>();
for (ZenPing.PingResponse pingResponse : pingResponses) {
if (pingResponse.master() != null) {
// We can't include the local node in pingMasters list, otherwise we may up electing ourselves without
// any check / verifications from other nodes in ZenDiscover#innerJoinCluster()
if (!localNode.equals(pingResponse.master())) {
pingMasters.add(pingResponse.master());
}
// We can't include the local node in pingMasters list, otherwise we may up electing ourselves without
// any check / verifications from other nodes in ZenDiscover#innerJoinCluster()
if (pingResponse.master() != null && !localNode.equals(pingResponse.master())) {
activeMasters.add(pingResponse.master());
}
}
// nodes discovered during pinging
Set<DiscoveryNode> activeNodes = new HashSet<>();
// nodes discovered who has previously been part of the cluster and do not ping for the very first time
Set<DiscoveryNode> joinedOnceActiveNodes = new HashSet<>();
if (localNode.isMasterNode()) {
activeNodes.add(localNode);
long joinsCounter = clusterJoinsCounter.get();
if (joinsCounter > 0) {
logger.trace("adding local node to the list of active nodes that have previously joined the cluster (joins counter is [{}])", joinsCounter);
joinedOnceActiveNodes.add(localNode);
}
}
List<ElectMasterService.MasterCandidate> masterCandidates = new ArrayList<>();
for (ZenPing.PingResponse pingResponse : pingResponses) {
activeNodes.add(pingResponse.node());
if (pingResponse.hasJoinedOnce()) {
joinedOnceActiveNodes.add(pingResponse.node());
if (pingResponse.node().isMasterNode()) {
masterCandidates.add(new ElectMasterService.MasterCandidate(pingResponse.node(), pingResponse.getClusterStateVersion()));
}
}
if (pingMasters.isEmpty()) {
if (electMaster.hasEnoughMasterNodes(activeNodes)) {
// we give preference to nodes who have previously already joined the cluster. Those will
// have a cluster state in memory, including an up to date routing table (which is not persistent to disk
// by the gateway)
DiscoveryNode master = electMaster.electMaster(joinedOnceActiveNodes);
if (master != null) {
return master;
}
return electMaster.electMaster(activeNodes);
if (activeMasters.isEmpty()) {
if (electMaster.hasEnoughCandidates(masterCandidates)) {
final ElectMasterService.MasterCandidate winner = electMaster.electMaster(masterCandidates);
logger.trace("candidate {} won election", winner);
return winner.getNode();
} else {
// if we don't have enough master nodes, we bail, because there are not enough master to elect from
logger.trace("not enough master nodes [{}]", activeNodes);
logger.trace("not enough master nodes [{}]", masterCandidates);
return null;
}
} else {
assert !pingMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master";
assert !activeMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master";
// lets tie break between discovered nodes
return electMaster.electMaster(pingMasters);
return electMaster.tieBreakActiveMasters(activeMasters);
}
}
static List<ZenPing.PingResponse> filterPingResponses(ZenPing.PingResponse[] fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) {
static List<ZenPing.PingResponse> filterPingResponses(List<ZenPing.PingResponse> fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) {
List<ZenPing.PingResponse> pingResponses;
if (masterElectionIgnoreNonMasters) {
pingResponses = Arrays.stream(fullPingResponses).filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList());
pingResponses = fullPingResponses.stream().filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList());
} else {
pingResponses = Arrays.asList(fullPingResponses);
pingResponses = fullPingResponses;
}
if (logger.isDebugEnabled()) {

View File

@ -168,7 +168,6 @@ public class MasterFaultDetection extends FaultDetection {
super.close();
stop("closing");
this.listeners.clear();
transportService.removeHandler(MASTER_PING_ACTION_NAME);
}
@Override

View File

@ -41,6 +41,8 @@ import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
@ -91,6 +93,14 @@ public class NodesFaultDetection extends FaultDetection {
listeners.remove(listener);
}
/**
* Gets the current set of nodes involved in node fault detection.
* NB: For testing purposes.
*/
public Set<DiscoveryNode> getNodes() {
return Collections.unmodifiableSet(nodesFD.keySet());
}
/**
* make sure that nodes in clusterState are pinged. Any pinging to nodes which are not
* part of the cluster will be stopped
@ -129,7 +139,6 @@ public class NodesFaultDetection extends FaultDetection {
public void close() {
super.close();
stop();
transportService.removeHandler(PING_ACTION_NAME);
}
@Override

View File

@ -76,12 +76,6 @@ public class MembershipAction extends AbstractComponent {
transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler());
}
public void close() {
transportService.removeHandler(DISCOVERY_JOIN_ACTION_NAME);
transportService.removeHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME);
transportService.removeHandler(DISCOVERY_LEAVE_ACTION_NAME);
}
public void sendLeaveRequest(DiscoveryNode masterNode, DiscoveryNode node) {
transportService.sendRequest(node, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(masterNode), EmptyTransportResponseHandler.INSTANCE_SAME);
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.discovery.zen.ping;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
/**
@ -26,7 +27,7 @@ import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
*/
public interface PingContextProvider extends DiscoveryNodesProvider {
/** return true if this node has previously joined the cluster at least once. False if this is first join */
boolean nodeHasJoinedClusterOnce();
/** return the current cluster state of the node */
ClusterState clusterState();
}

View File

@ -20,30 +20,42 @@
package org.elasticsearch.discovery.zen.ping;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.zen.ElectMasterService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
public interface ZenPing extends LifecycleComponent {
void setPingContextProvider(PingContextProvider contextProvider);
void ping(PingListener listener, TimeValue timeout);
public interface PingListener {
interface PingListener {
void onPing(PingResponse[] pings);
/**
* called when pinging is done.
*
* @param pings ping result *must
*/
void onPing(Collection<PingResponse> pings);
}
public static class PingResponse implements Streamable {
class PingResponse implements Streamable {
public static final PingResponse[] EMPTY = new PingResponse[0];
@ -59,29 +71,36 @@ public interface ZenPing extends LifecycleComponent {
private DiscoveryNode master;
private boolean hasJoinedOnce;
private long clusterStateVersion;
private PingResponse() {
}
/**
* @param node the node which this ping describes
* @param master the current master of the node
* @param clusterName the cluster name of the node
* @param hasJoinedOnce true if the joined has successfully joined the cluster before
* @param node the node which this ping describes
* @param master the current master of the node
* @param clusterName the cluster name of the node
* @param clusterStateVersion the current cluster state version of that node
* ({@link ElectMasterService.MasterCandidate#UNRECOVERED_CLUSTER_VERSION} for not recovered)
*/
public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterName clusterName, boolean hasJoinedOnce) {
public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterName clusterName, long clusterStateVersion) {
this.id = idGenerator.incrementAndGet();
this.node = node;
this.master = master;
this.clusterName = clusterName;
this.hasJoinedOnce = hasJoinedOnce;
this.clusterStateVersion = clusterStateVersion;
}
/**
* an always increasing unique identifier for this ping response.
* lower values means older pings.
*/
public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterState state) {
this(node, master, state.getClusterName(),
state.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) ?
ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION : state.version());
}
/**
* an always increasing unique identifier for this ping response.
* lower values means older pings.
*/
public long id() {
return this.id;
}
@ -100,9 +119,11 @@ public interface ZenPing extends LifecycleComponent {
return master;
}
/** true if the joined has successfully joined the cluster before */
public boolean hasJoinedOnce() {
return hasJoinedOnce;
/**
* the current cluster state version of that node ({@link ElectMasterService.MasterCandidate#UNRECOVERED_CLUSTER_VERSION}
* for not recovered) */
public long getClusterStateVersion() {
return clusterStateVersion;
}
public static PingResponse readPingResponse(StreamInput in) throws IOException {
@ -118,7 +139,7 @@ public interface ZenPing extends LifecycleComponent {
if (in.readBoolean()) {
master = new DiscoveryNode(in);
}
this.hasJoinedOnce = in.readBoolean();
this.clusterStateVersion = in.readLong();
this.id = in.readLong();
}
@ -132,13 +153,14 @@ public interface ZenPing extends LifecycleComponent {
out.writeBoolean(true);
master.writeTo(out);
}
out.writeBoolean(hasJoinedOnce);
out.writeLong(clusterStateVersion);
out.writeLong(id);
}
@Override
public String toString() {
return "ping_response{node [" + node + "], id[" + id + "], master [" + master + "], hasJoinedOnce [" + hasJoinedOnce + "], cluster_name[" + clusterName.value() + "]}";
return "ping_response{node [" + node + "], id[" + id + "], master [" + master + "], cluster_state_version [" + clusterStateVersion
+ "], cluster_name[" + clusterName.value() + "]}";
}
}
@ -146,7 +168,7 @@ public interface ZenPing extends LifecycleComponent {
/**
* a utility collection of pings where only the most recent ping is stored per node
*/
public static class PingCollection {
class PingCollection {
Map<DiscoveryNode, PingResponse> pings;
@ -171,15 +193,15 @@ public interface ZenPing extends LifecycleComponent {
}
/** adds multiple pings if newer than previous pings from the same node */
public synchronized void addPings(PingResponse[] pings) {
public synchronized void addPings(Iterable<PingResponse> pings) {
for (PingResponse ping : pings) {
addPing(ping);
}
}
/** serialize current pings to an array */
public synchronized PingResponse[] toArray() {
return pings.values().toArray(new PingResponse[pings.size()]);
/** serialize current pings to a list. It is guaranteed that the list contains one ping response per node */
public synchronized List<PingResponse> toList() {
return new ArrayList<>(pings.values());
}
/** the number of nodes for which there are known pings */

View File

@ -23,17 +23,15 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.AtomicBoolean;
public class ZenPingService extends AbstractLifecycleComponent implements ZenPing {
public class ZenPingService extends AbstractLifecycleComponent {
private List<ZenPing> zenPings = Collections.emptyList();
@ -47,7 +45,6 @@ public class ZenPingService extends AbstractLifecycleComponent implements ZenPin
return this.zenPings;
}
@Override
public void setPingContextProvider(PingContextProvider contextProvider) {
if (lifecycle.started()) {
throw new IllegalStateException("Can't set nodes provider when started");
@ -78,60 +75,31 @@ public class ZenPingService extends AbstractLifecycleComponent implements ZenPin
}
}
public PingResponse[] pingAndWait(TimeValue timeout) {
final AtomicReference<PingResponse[]> response = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
ping(new PingListener() {
@Override
public void onPing(PingResponse[] pings) {
response.set(pings);
latch.countDown();
public ZenPing.PingCollection pingAndWait(TimeValue timeout) {
final ZenPing.PingCollection response = new ZenPing.PingCollection();
final CountDownLatch latch = new CountDownLatch(zenPings.size());
for (ZenPing zenPing : zenPings) {
final AtomicBoolean counted = new AtomicBoolean();
try {
zenPing.ping(pings -> {
response.addPings(pings);
if (counted.compareAndSet(false, true)) {
latch.countDown();
}
}, timeout);
} catch (Exception ex) {
logger.warn("Ping execution failed", ex);
if (counted.compareAndSet(false, true)) {
latch.countDown();
}
}
}, timeout);
}
try {
latch.await();
return response.get();
return response;
} catch (InterruptedException e) {
logger.trace("pingAndWait interrupted");
return null;
}
}
@Override
public void ping(PingListener listener, TimeValue timeout) {
List<? extends ZenPing> zenPings = this.zenPings;
CompoundPingListener compoundPingListener = new CompoundPingListener(listener, zenPings);
for (ZenPing zenPing : zenPings) {
try {
zenPing.ping(compoundPingListener, timeout);
} catch (EsRejectedExecutionException ex) {
logger.debug("Ping execution rejected", ex);
compoundPingListener.onPing(null);
}
}
}
private static class CompoundPingListener implements PingListener {
private final PingListener listener;
private final AtomicInteger counter;
private PingCollection responses = new PingCollection();
private CompoundPingListener(PingListener listener, List<? extends ZenPing> zenPings) {
this.listener = listener;
this.counter = new AtomicInteger(zenPings.size());
}
@Override
public void onPing(PingResponse[] pings) {
if (pings != null) {
responses.addPings(pings);
}
if (counter.decrementAndGet() == 0) {
listener.onPing(responses.toArray());
}
return response;
}
}
}

View File

@ -44,7 +44,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.PingContextProvider;
import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.threadpool.ThreadPool;
@ -63,6 +63,7 @@ import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@ -160,18 +161,10 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
}
logger.debug("using initial hosts {}, with concurrent_connects [{}]", hosts, concurrentConnects);
List<DiscoveryNode> configuredTargetNodes = new ArrayList<>();
for (String host : hosts) {
try {
TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts);
for (TransportAddress address : addresses) {
configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#",
address, emptyMap(), emptySet(), getVersion().minimumCompatibilityVersion()));
}
} catch (Exception e) {
throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e);
}
for (final String host : hosts) {
configuredTargetNodes.addAll(resolveDiscoveryNodes(host, limitPortCounts, transportService,
() -> UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#"));
}
this.configuredTargetNodes = configuredTargetNodes.toArray(new DiscoveryNode[configuredTargetNodes.size()]);
@ -183,6 +176,32 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
threadFactory, threadPool.getThreadContext());
}
/**
* Resolves a host to a list of discovery nodes. The host is resolved into a transport
* address (or a collection of addresses if the number of ports is greater than one) and
* the transport addresses are used to created discovery nodes.
*
* @param host the host to resolve
* @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)
* @param transportService the transport service
* @param idGenerator the generator to supply unique ids for each discovery node
* @return a list of discovery nodes with resolved transport addresses
*/
public static List<DiscoveryNode> resolveDiscoveryNodes(final String host, final int limitPortCounts,
final TransportService transportService, final Supplier<String> idGenerator) {
List<DiscoveryNode> discoveryNodes = new ArrayList<>();
try {
TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts);
for (TransportAddress address : addresses) {
discoveryNodes.add(new DiscoveryNode(idGenerator.get(), address, emptyMap(), emptySet(),
Version.CURRENT.minimumCompatibilityVersion()));
}
} catch (Exception e) {
throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e);
}
return discoveryNodes;
}
@Override
protected void doStart() {
}
@ -193,7 +212,6 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
@Override
protected void doClose() {
transportService.removeHandler(ACTION_NAME);
ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS);
try {
IOUtils.close(receivedResponses.values());
@ -219,8 +237,9 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
temporalResponses.clear();
}
public PingResponse[] pingAndWait(TimeValue duration) {
final AtomicReference<PingResponse[]> response = new AtomicReference<>();
// test only
Collection<PingResponse> pingAndWait(TimeValue duration) {
final AtomicReference<Collection<PingResponse>> response = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
ping(pings -> {
response.set(pings);
@ -256,7 +275,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
protected void doRun() throws Exception {
sendPings(duration, TimeValue.timeValueMillis(duration.millis() / 2), sendPingsHandler);
sendPingsHandler.close();
listener.onPing(sendPingsHandler.pingCollection().toArray());
listener.onPing(sendPingsHandler.pingCollection().toList());
for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) {
logger.trace("[{}] disconnecting from {}", sendPingsHandler.id(), node);
transportService.disconnectFromNode(node);
@ -559,8 +578,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
}
private PingResponse createPingResponse(DiscoveryNodes discoNodes) {
return new PingResponse(discoNodes.getLocalNode(), discoNodes.getMasterNode(), clusterName,
contextProvider.nodeHasJoinedClusterOnce());
return new PingResponse(discoNodes.getLocalNode(), discoNodes.getMasterNode(), contextProvider.clusterState());
}
static class UnicastPingResponse extends TransportResponse {

View File

@ -107,11 +107,6 @@ public class PublishClusterStateAction extends AbstractComponent {
transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, new CommitClusterStateRequestHandler());
}
public void close() {
transportService.removeHandler(SEND_ACTION_NAME);
transportService.removeHandler(COMMIT_ACTION_NAME);
}
public PendingClusterStatesQueue pendingStatesQueue() {
return pendingStatesQueue;
}

View File

@ -209,13 +209,6 @@ public final class NodeEnvironment implements Closeable {
for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) {
Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex];
Path dataDir = environment.dataFiles()[dirIndex];
// TODO: Remove this in 6.0, we are no longer going to read from the cluster name directory
if (readFromDataPathWithClusterName(dataDirWithClusterName)) {
DeprecationLogger deprecationLogger = new DeprecationLogger(startupTraceLogger);
deprecationLogger.deprecated("ES has detected the [path.data] folder using the cluster name as a folder [{}], " +
"Elasticsearch 6.0 will not allow the cluster name as a folder within the data path", dataDir);
dataDir = dataDirWithClusterName;
}
Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
Files.createDirectories(dir);
@ -289,25 +282,6 @@ public final class NodeEnvironment implements Closeable {
}
}
// Visible for testing
/** Returns true if data should be read from the data path that includes the cluster name (ie, it has data in it) */
static boolean readFromDataPathWithClusterName(Path dataPathWithClusterName) throws IOException {
if (Files.exists(dataPathWithClusterName) == false || // If it doesn't exist
Files.isDirectory(dataPathWithClusterName) == false || // Or isn't a directory
dirEmpty(dataPathWithClusterName)) { // Or if it's empty
// No need to read from cluster-name folder!
return false;
}
// The "nodes" directory inside of the cluster name
Path nodesPath = dataPathWithClusterName.resolve(NODES_FOLDER);
if (Files.isDirectory(nodesPath)) {
// The cluster has data in the "nodes" so we should read from the cluster-named folder for now
return true;
}
// Hey the nodes directory didn't exist, so we can safely use whatever directory we feel appropriate
return false;
}
private static void releaseAndNullLocks(Lock[] locks) {
for (int i = 0; i < locks.length; i++) {
if (locks[i] != null) {

View File

@ -0,0 +1,88 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gateway;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
/**
* An abstract class that implements basic functionality for allocating
* shards to nodes based on shard copies that already exist in the cluster.
*
* Individual implementations of this class are responsible for providing
* the logic to determine to which nodes (if any) those shards are allocated.
*/
public abstract class BaseGatewayShardAllocator extends AbstractComponent {
public BaseGatewayShardAllocator(Settings settings) {
super(settings);
}
/**
* Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist.
* It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)}
* to make decisions on assigning shards to nodes.
*
* @param allocation the allocation state container object
*/
public void allocateUnassigned(RoutingAllocation allocation) {
final RoutingNodes routingNodes = allocation.routingNodes();
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) {
final ShardRouting shard = unassignedIterator.next();
final UnassignedShardDecision unassignedShardDecision = makeAllocationDecision(shard, allocation, logger);
if (unassignedShardDecision.isDecisionTaken() == false) {
// no decision was taken by this allocator
continue;
}
if (unassignedShardDecision.getFinalDecisionSafe().type() == Decision.Type.YES) {
unassignedIterator.initialize(unassignedShardDecision.getAssignedNodeId(),
unassignedShardDecision.getAllocationId(),
shard.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE :
allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE),
allocation.changes());
} else {
unassignedIterator.removeAndIgnore(unassignedShardDecision.getAllocationStatus(), allocation.changes());
}
}
}
/**
* Make a decision on the allocation of an unassigned shard. This method is used by
* {@link #allocateUnassigned(RoutingAllocation)} to make decisions about whether or not
* the shard can be allocated by this allocator and if so, to which node it will be allocated.
*
* @param unassignedShard the unassigned shard to allocate
* @param allocation the current routing state
* @param logger the logger
* @return an {@link UnassignedShardDecision} with the final decision of whether to allocate and details of the decision
*/
public abstract UnassignedShardDecision makeAllocationDecision(ShardRouting unassignedShard,
RoutingAllocation allocation,
Logger logger);
}

View File

@ -34,7 +34,6 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@ -282,11 +281,8 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste
routingTableBuilder.version(0);
// now, reroute
RoutingAllocation.Result routingResult = allocationService.reroute(
ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(),
"state recovered");
return ClusterState.builder(updatedState).routingResult(routingResult).build();
updatedState = ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build();
return allocationService.reroute(updatedState, "state recovered");
}
@Override

View File

@ -30,7 +30,6 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
@ -169,10 +168,8 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
ClusterState updatedState = ClusterState.builder(currentState).metaData(metaData).blocks(blocks).routingTable(routingTable).build();
// now, reroute
RoutingAllocation.Result routingResult = allocationService.reroute(
return allocationService.reroute(
ClusterState.builder(updatedState).routingTable(routingTable).build(), "dangling indices allocated");
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
@Override

View File

@ -19,12 +19,12 @@
package org.elasticsearch.gateway;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
@ -32,19 +32,23 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.AsyncShardFetch.FetchResult;
import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards;
import org.elasticsearch.index.shard.ShardStateMetaData;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
@ -62,7 +66,7 @@ import java.util.stream.Collectors;
* nor does it allocate primaries when a primary shard failed and there is a valid replica
* copy that can immediately be promoted to primary, as this takes place in {@link RoutingNodes#failShard}.
*/
public abstract class PrimaryShardAllocator extends AbstractComponent {
public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
private static final Function<String, String> INITIAL_SHARDS_PARSER = (value) -> {
switch (value) {
@ -94,110 +98,161 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
logger.debug("using initial_shards [{}]", NODE_INITIAL_SHARDS_SETTING.get(settings));
}
public void allocateUnassigned(RoutingAllocation allocation) {
final RoutingNodes routingNodes = allocation.routingNodes();
final MetaData metaData = allocation.metaData();
/**
* Is the allocator responsible for allocating the given {@link ShardRouting}?
*/
private static boolean isResponsibleFor(final ShardRouting shard) {
return shard.primary() // must be primary
&& shard.unassigned() // must be unassigned
// only handle either an existing store or a snapshot recovery
&& (shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE
|| shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT);
}
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) {
final ShardRouting shard = unassignedIterator.next();
@Override
public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard,
final RoutingAllocation allocation,
final Logger logger) {
if (isResponsibleFor(unassignedShard) == false) {
// this allocator is not responsible for allocating this shard
return UnassignedShardDecision.DECISION_NOT_TAKEN;
}
if (shard.primary() == false) {
continue;
}
final boolean explain = allocation.debugDecision();
final FetchResult<NodeGatewayStartedShards> shardState = fetchData(unassignedShard, allocation);
if (shardState.hasData() == false) {
allocation.setHasPendingAsyncFetch();
return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA,
"still fetching shard state from the nodes in the cluster");
}
if (shard.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE &&
shard.recoverySource().getType() != RecoverySource.Type.SNAPSHOT) {
continue;
}
// don't create a new IndexSetting object for every shard as this could cause a lot of garbage
// on cluster restart if we allocate a boat load of shards
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(unassignedShard.index());
final Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(unassignedShard.id());
final boolean snapshotRestore = unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT;
final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
final AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
if (shardState.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard started state", shard);
allocation.setHasPendingAsyncFetch();
unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA, allocation.changes());
continue;
}
final NodeShardsResult nodeShardsResult;
final boolean enoughAllocationsFound;
// don't create a new IndexSetting object for every shard as this could cause a lot of garbage
// on cluster restart if we allocate a boat load of shards
final IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
final Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(shard.id());
final boolean snapshotRestore = shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT;
final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
final NodeShardsResult nodeShardsResult;
final boolean enoughAllocationsFound;
if (inSyncAllocationIds.isEmpty()) {
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) : "trying to allocated a primary with an empty allocation id set, but index is new";
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
// fall back to old version-based allocation mode
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
nodeShardsResult = buildVersionBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState);
if (snapshotRestore || recoverOnAnyNode) {
enoughAllocationsFound = nodeShardsResult.allocationsFound > 0;
} else {
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
}
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, shard);
if (inSyncAllocationIds.isEmpty()) {
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) :
"trying to allocated a primary with an empty allocation id set, but index is new";
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
// fall back to old version-based allocation mode
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
nodeShardsResult = buildVersionBasedNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode,
allocation.getIgnoreNodes(unassignedShard.shardId()), shardState, logger);
if (snapshotRestore || recoverOnAnyNode) {
enoughAllocationsFound = nodeShardsResult.allocationsFound > 0;
} else {
assert inSyncAllocationIds.isEmpty() == false;
// use allocation ids to select nodes
nodeShardsResult = buildAllocationIdBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode,
allocation.getIgnoreNodes(shard.shardId()), inSyncAllocationIds, shardState);
enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0;
logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodeShardsResult.orderedAllocationCandidates.size(), shard, inSyncAllocationIds);
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
}
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", unassignedShard.index(),
unassignedShard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, unassignedShard);
} else {
assert inSyncAllocationIds.isEmpty() == false;
// use allocation ids to select nodes
nodeShardsResult = buildAllocationIdBasedNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode,
allocation.getIgnoreNodes(unassignedShard.shardId()), inSyncAllocationIds, shardState, logger);
enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0;
logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", unassignedShard.index(),
unassignedShard.id(), nodeShardsResult.orderedAllocationCandidates.size(), unassignedShard, inSyncAllocationIds);
}
if (enoughAllocationsFound == false){
if (snapshotRestore) {
// let BalancedShardsAllocator take care of allocating this shard
logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.recoverySource());
} else if (recoverOnAnyNode) {
// let BalancedShardsAllocator take care of allocating this shard
logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id());
} else {
// we can't really allocate, so ignore it and continue
unassignedIterator.removeAndIgnore(AllocationStatus.NO_VALID_SHARD_COPY, allocation.changes());
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodeShardsResult.allocationsFound);
}
continue;
}
final NodesToAllocate nodesToAllocate = buildNodesToAllocate(
allocation, nodeShardsResult.orderedAllocationCandidates, shard, false
);
if (nodesToAllocate.yesNodeShards.isEmpty() == false) {
NodeGatewayStartedShards nodeShardState = nodesToAllocate.yesNodeShards.get(0);
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation.changes());
} else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
// The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard
// can be force-allocated to one of the nodes.
final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate(
allocation, nodeShardsResult.orderedAllocationCandidates, shard, true
);
if (nodesToForceAllocate.yesNodeShards.isEmpty() == false) {
NodeGatewayStartedShards nodeShardState = nodesToForceAllocate.yesNodeShards.get(0);
logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation",
shard.index(), shard.id(), shard, nodeShardState.getNode());
unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(),
ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation.changes());
} else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) {
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation",
shard.index(), shard.id(), shard, nodesToForceAllocate.throttleNodeShards);
unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED, allocation.changes());
} else {
logger.debug("[{}][{}]: forced primary allocation denied [{}]", shard.index(), shard.id(), shard);
unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_NO, allocation.changes());
}
if (enoughAllocationsFound == false) {
if (snapshotRestore) {
// let BalancedShardsAllocator take care of allocating this shard
logger.debug("[{}][{}]: missing local data, will restore from [{}]",
unassignedShard.index(), unassignedShard.id(), unassignedShard.recoverySource());
return UnassignedShardDecision.DECISION_NOT_TAKEN;
} else if (recoverOnAnyNode) {
// let BalancedShardsAllocator take care of allocating this shard
logger.debug("[{}][{}]: missing local data, recover from any node", unassignedShard.index(), unassignedShard.id());
return UnassignedShardDecision.DECISION_NOT_TAKEN;
} else {
// we are throttling this, but we have enough to allocate to this node, ignore it for now
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards);
unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED, allocation.changes());
// We have a shard that was previously allocated, but we could not find a valid shard copy to allocate the primary.
// We could just be waiting for the node that holds the primary to start back up, in which case the allocation for
// this shard will be picked up when the node joins and we do another allocation reroute
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]",
unassignedShard.index(), unassignedShard.id(), nodeShardsResult.allocationsFound);
return UnassignedShardDecision.noDecision(AllocationStatus.NO_VALID_SHARD_COPY,
"shard was previously allocated, but no valid shard copy could be found amongst the current nodes in the cluster");
}
}
final NodesToAllocate nodesToAllocate = buildNodesToAllocate(
allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, false
);
if (nodesToAllocate.yesNodeShards.isEmpty() == false) {
DecidedNode decidedNode = nodesToAllocate.yesNodeShards.get(0);
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation",
unassignedShard.index(), unassignedShard.id(), unassignedShard, decidedNode.nodeShardState.getNode());
final String nodeId = decidedNode.nodeShardState.getNode().getId();
return UnassignedShardDecision.yesDecision(
"the allocation deciders returned a YES decision to allocate to node [" + nodeId + "]",
nodeId, decidedNode.nodeShardState.allocationId(), buildNodeDecisions(nodesToAllocate, explain));
} else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
// The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard
// can be force-allocated to one of the nodes.
final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate(
allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, true
);
if (nodesToForceAllocate.yesNodeShards.isEmpty() == false) {
final DecidedNode decidedNode = nodesToForceAllocate.yesNodeShards.get(0);
final NodeGatewayStartedShards nodeShardState = decidedNode.nodeShardState;
logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation",
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeShardState.getNode());
final String nodeId = nodeShardState.getNode().getId();
return UnassignedShardDecision.yesDecision(
"allocating the primary shard to node [" + nodeId+ "], which has a complete copy of the shard data",
nodeId,
nodeShardState.allocationId(),
buildNodeDecisions(nodesToForceAllocate, explain));
} else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) {
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation",
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToForceAllocate.throttleNodeShards);
return UnassignedShardDecision.throttleDecision(
"allocation throttled as all nodes to which the shard may be force allocated are busy with other recoveries",
buildNodeDecisions(nodesToForceAllocate, explain));
} else {
logger.debug("[{}][{}]: forced primary allocation denied [{}]",
unassignedShard.index(), unassignedShard.id(), unassignedShard);
return UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO,
"all nodes that hold a valid shard copy returned a NO decision, and force allocation is not permitted",
buildNodeDecisions(nodesToForceAllocate, explain));
}
} else {
// we are throttling this, since we are allowed to allocate to this node but there are enough allocations
// taking place on the node currently, ignore it for now
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation",
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToAllocate.throttleNodeShards);
return UnassignedShardDecision.throttleDecision(
"allocation throttled as all nodes to which the shard may be allocated are busy with other recoveries",
buildNodeDecisions(nodesToAllocate, explain));
}
}
/**
* Builds a map of nodes to the corresponding allocation decisions for those nodes.
*/
private static Map<String, Decision> buildNodeDecisions(NodesToAllocate nodesToAllocate, boolean explain) {
if (explain == false) {
// not in explain mode, no need to return node level decisions
return null;
}
Map<String, Decision> nodeDecisions = new LinkedHashMap<>();
for (final DecidedNode decidedNode : nodesToAllocate.yesNodeShards) {
nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
}
for (final DecidedNode decidedNode : nodesToAllocate.throttleNodeShards) {
nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
}
for (final DecidedNode decidedNode : nodesToAllocate.noNodeShards) {
nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
}
return nodeDecisions;
}
/**
@ -205,8 +260,10 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
* lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
* entries with matching allocation id are always at the front of the list.
*/
protected NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState) {
protected static NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard,
Set<String> ignoreNodes, Set<String> lastActiveAllocationIds,
FetchResult<NodeGatewayStartedShards> shardState,
Logger logger) {
LinkedList<NodeGatewayStartedShards> matchingNodeShardStates = new LinkedList<>();
LinkedList<NodeGatewayStartedShards> nonMatchingNodeShardStates = new LinkedList<>();
int numberOfAllocationsFound = 0;
@ -299,9 +356,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
List<NodeGatewayStartedShards> nodeShardStates,
ShardRouting shardRouting,
boolean forceAllocate) {
List<NodeGatewayStartedShards> yesNodeShards = new ArrayList<>();
List<NodeGatewayStartedShards> throttledNodeShards = new ArrayList<>();
List<NodeGatewayStartedShards> noNodeShards = new ArrayList<>();
List<DecidedNode> yesNodeShards = new ArrayList<>();
List<DecidedNode> throttledNodeShards = new ArrayList<>();
List<DecidedNode> noNodeShards = new ArrayList<>();
for (NodeGatewayStartedShards nodeShardState : nodeShardStates) {
RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().getId());
if (node == null) {
@ -310,12 +367,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
Decision decision = forceAllocate ? allocation.deciders().canForceAllocatePrimary(shardRouting, node, allocation) :
allocation.deciders().canAllocate(shardRouting, node, allocation);
if (decision.type() == Decision.Type.THROTTLE) {
throttledNodeShards.add(nodeShardState);
} else if (decision.type() == Decision.Type.NO) {
noNodeShards.add(nodeShardState);
DecidedNode decidedNode = new DecidedNode(nodeShardState, decision);
if (decision.type() == Type.THROTTLE) {
throttledNodeShards.add(decidedNode);
} else if (decision.type() == Type.NO) {
noNodeShards.add(decidedNode);
} else {
yesNodeShards.add(nodeShardState);
yesNodeShards.add(decidedNode);
}
}
return new NodesToAllocate(Collections.unmodifiableList(yesNodeShards), Collections.unmodifiableList(throttledNodeShards), Collections.unmodifiableList(noNodeShards));
@ -325,8 +383,8 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
* Builds a list of previously started shards. If matchAnyShard is set to false, only shards with the highest shard version are added to
* the list. Otherwise, any existing shard is added to the list, but entries with highest version are always at the front of the list.
*/
NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState) {
static NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
FetchResult<NodeGatewayStartedShards> shardState, Logger logger) {
final List<NodeGatewayStartedShards> allocationCandidates = new ArrayList<>();
int numberOfAllocationsFound = 0;
long highestVersion = ShardStateMetaData.NO_VERSION;
@ -400,7 +458,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
&& IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING.get(metaData.getSettings(), this.settings);
}
protected abstract AsyncShardFetch.FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
protected abstract FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
static class NodeShardsResult {
public final List<NodeGatewayStartedShards> orderedAllocationCandidates;
@ -413,16 +471,28 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
}
static class NodesToAllocate {
final List<NodeGatewayStartedShards> yesNodeShards;
final List<NodeGatewayStartedShards> throttleNodeShards;
final List<NodeGatewayStartedShards> noNodeShards;
final List<DecidedNode> yesNodeShards;
final List<DecidedNode> throttleNodeShards;
final List<DecidedNode> noNodeShards;
public NodesToAllocate(List<NodeGatewayStartedShards> yesNodeShards,
List<NodeGatewayStartedShards> throttleNodeShards,
List<NodeGatewayStartedShards> noNodeShards) {
public NodesToAllocate(List<DecidedNode> yesNodeShards, List<DecidedNode> throttleNodeShards, List<DecidedNode> noNodeShards) {
this.yesNodeShards = yesNodeShards;
this.throttleNodeShards = throttleNodeShards;
this.noNodeShards = noNodeShards;
}
}
/**
* This class encapsulates the shard state retrieved from a node and the decision that was made
* by the allocator for allocating to the node that holds the shard copy.
*/
private static class DecidedNode {
final NodeGatewayStartedShards nodeShardState;
final Decision decision;
private DecidedNode(NodeGatewayStartedShards nodeShardState, Decision decision) {
this.nodeShardState = nodeShardState;
this.decision = decision;
}
}
}

View File

@ -23,7 +23,7 @@ import com.carrotsearch.hppc.ObjectLongHashMap;
import com.carrotsearch.hppc.ObjectLongMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
@ -31,24 +31,25 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.RoutingChangesObserver;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.index.store.StoreFileMetaData;
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
*/
public abstract class ReplicaShardAllocator extends AbstractComponent {
public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
public ReplicaShardAllocator(Settings settings) {
super(settings);
@ -96,7 +97,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
continue;
}
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores);
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores, false);
if (matchingNodes.getNodeWithHighestMatch() != null) {
DiscoveryNode currentNode = allocation.nodes().get(shard.currentNodeId());
DiscoveryNode nodeWithHighestMatch = matchingNodes.getNodeWithHighestMatch();
@ -128,86 +129,88 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
}
}
public void allocateUnassigned(RoutingAllocation allocation) {
final RoutingNodes routingNodes = allocation.routingNodes();
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) {
ShardRouting shard = unassignedIterator.next();
if (shard.primary()) {
continue;
}
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
if (shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
continue;
}
// pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
Decision decision = canBeAllocatedToAtLeastOneNode(shard, allocation);
if (decision.type() != Decision.Type.YES) {
logger.trace("{}: ignoring allocation, can't be allocated on any node", shard);
unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision), allocation.changes());
continue;
}
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores = fetchData(shard, allocation);
if (shardStores.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard stores", shard);
allocation.setHasPendingAsyncFetch();
unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA, allocation.changes());
continue; // still fetching
}
ShardRouting primaryShard = routingNodes.activePrimary(shard.shardId());
assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary";
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores);
if (primaryStore == null) {
// if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed)
// we want to let the replica be allocated in order to expose the actual problem with the primary that the replica
// will try and recover from
// Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData
logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", shard);
continue;
}
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores);
if (matchingNodes.getNodeWithHighestMatch() != null) {
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId());
// we only check on THROTTLE since we checked before before on NO
decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation);
if (decision.type() == Decision.Type.THROTTLE) {
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
// we are throttling this, but we have enough to allocate to this node, ignore it for now
unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision), allocation.changes());
} else {
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
// we found a match
unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes());
}
} else if (matchingNodes.hasAnyData() == false) {
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed
ignoreUnassignedIfDelayed(unassignedIterator, shard, allocation.changes());
}
}
/**
* Is the allocator responsible for allocating the given {@link ShardRouting}?
*/
private static boolean isResponsibleFor(final ShardRouting shard) {
return shard.primary() == false // must be a replica
&& shard.unassigned() // must be unassigned
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
&& shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED;
}
/**
* Check if the allocation of the replica is to be delayed. Compute the delay and if it is delayed, add it to the ignore unassigned list
* Note: we only care about replica in delayed allocation, since if we have an unassigned primary it
* will anyhow wait to find an existing copy of the shard to be allocated
* Note: the other side of the equation is scheduling a reroute in a timely manner, which happens in the RoutingService
*
* PUBLIC FOR TESTS!
*
* @param unassignedIterator iterator over unassigned shards
* @param shard the shard which might be delayed
*/
public void ignoreUnassignedIfDelayed(RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator, ShardRouting shard, RoutingChangesObserver changes) {
if (shard.unassignedInfo().isDelayed()) {
logger.debug("{}: allocation of [{}] is delayed", shard.shardId(), shard);
unassignedIterator.removeAndIgnore(AllocationStatus.DELAYED_ALLOCATION, changes);
@Override
public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard,
final RoutingAllocation allocation,
final Logger logger) {
if (isResponsibleFor(unassignedShard) == false) {
// this allocator is not responsible for deciding on this shard
return UnassignedShardDecision.DECISION_NOT_TAKEN;
}
final RoutingNodes routingNodes = allocation.routingNodes();
final boolean explain = allocation.debugDecision();
// pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
Tuple<Decision, Map<String, Decision>> allocateDecision = canBeAllocatedToAtLeastOneNode(unassignedShard, allocation, explain);
if (allocateDecision.v1().type() != Decision.Type.YES) {
logger.trace("{}: ignoring allocation, can't be allocated on any node", unassignedShard);
return UnassignedShardDecision.noDecision(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.v1()),
"all nodes returned a " + allocateDecision.v1().type() + " decision for allocating the replica shard",
allocateDecision.v2());
}
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores = fetchData(unassignedShard, allocation);
if (shardStores.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard stores", unassignedShard);
allocation.setHasPendingAsyncFetch();
return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA,
"still fetching shard state from the nodes in the cluster");
}
ShardRouting primaryShard = routingNodes.activePrimary(unassignedShard.shardId());
assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary";
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores);
if (primaryStore == null) {
// if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed)
// we want to let the replica be allocated in order to expose the actual problem with the primary that the replica
// will try and recover from
// Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData
logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", unassignedShard);
return UnassignedShardDecision.DECISION_NOT_TAKEN;
}
MatchingNodes matchingNodes = findMatchingNodes(unassignedShard, allocation, primaryStore, shardStores, explain);
assert explain == false || matchingNodes.nodeDecisions != null : "in explain mode, we must have individual node decisions";
if (matchingNodes.getNodeWithHighestMatch() != null) {
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId());
// we only check on THROTTLE since we checked before before on NO
Decision decision = allocation.deciders().canAllocate(unassignedShard, nodeWithHighestMatch, allocation);
if (decision.type() == Decision.Type.THROTTLE) {
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store",
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node());
// we are throttling this, as we have enough other shards to allocate to this node, so ignore it for now
return UnassignedShardDecision.throttleDecision(
"returned a THROTTLE decision on each node that has an existing copy of the shard, so waiting to re-use one " +
"of those copies", matchingNodes.nodeDecisions);
} else {
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store",
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node());
// we found a match
return UnassignedShardDecision.yesDecision(
"allocating to node [" + nodeWithHighestMatch.nodeId() + "] in order to re-use its unallocated persistent store",
nodeWithHighestMatch.nodeId(), null, matchingNodes.nodeDecisions);
}
} else if (matchingNodes.hasAnyData() == false && unassignedShard.unassignedInfo().isDelayed()) {
// if we didn't manage to find *any* data (regardless of matching sizes), and the replica is
// unassigned due to a node leaving, so we delay allocation of this replica to see if the
// node with the shard copy will rejoin so we can re-use the copy it has
logger.debug("{}: allocation of [{}] is delayed", unassignedShard.shardId(), unassignedShard);
return UnassignedShardDecision.noDecision(AllocationStatus.DELAYED_ALLOCATION,
"not allocating this shard, no nodes contain data for the replica and allocation is delayed");
}
return UnassignedShardDecision.DECISION_NOT_TAKEN;
}
/**
@ -215,10 +218,15 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
*
* Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one
* node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided
* YES or THROTTLE.
* YES or THROTTLE). If the explain flag is turned on AND the decision is NO or THROTTLE, then this method
* also returns a map of nodes to decisions (second value in the tuple) to use for explanations; if the explain
* flag is off, the second value in the return tuple will be null.
*/
private Decision canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) {
private Tuple<Decision, Map<String, Decision>> canBeAllocatedToAtLeastOneNode(ShardRouting shard,
RoutingAllocation allocation,
boolean explain) {
Decision madeDecision = Decision.NO;
Map<String, Decision> nodeDecisions = new HashMap<>();
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().getDataNodes().values()) {
RoutingNode node = allocation.routingNodes().node(cursor.value.getId());
if (node == null) {
@ -227,13 +235,16 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
// if we can't allocate it on a node, ignore it, for example, this handles
// cases for only allocating a replica after a primary
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (explain) {
nodeDecisions.put(node.nodeId(), decision);
}
if (decision.type() == Decision.Type.YES) {
return decision;
return Tuple.tuple(decision, null);
} else if (madeDecision.type() == Decision.Type.NO && decision.type() == Decision.Type.THROTTLE) {
madeDecision = decision;
}
}
return madeDecision;
return Tuple.tuple(madeDecision, explain ? nodeDecisions : null);
}
/**
@ -254,8 +265,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation,
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore,
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data) {
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data,
boolean explain) {
ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
Map<String, Decision> nodeDecisions = new HashMap<>();
for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
DiscoveryNode discoNode = nodeStoreEntry.getKey();
TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
@ -273,6 +286,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
// we only check for NO, since if this node is THROTTLING and it has enough "same data"
// then we will try and assign it next time
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (explain) {
nodeDecisions.put(node.nodeId(), decision);
}
if (decision.type() == Decision.Type.NO) {
continue;
}
@ -297,7 +314,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
}
}
return new MatchingNodes(nodesToSize);
return new MatchingNodes(nodesToSize, explain ? nodeDecisions : null);
}
protected abstract AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetchData(ShardRouting shard, RoutingAllocation allocation);
@ -305,9 +322,12 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
static class MatchingNodes {
private final ObjectLongMap<DiscoveryNode> nodesToSize;
private final DiscoveryNode nodeWithHighestMatch;
@Nullable
private final Map<String, Decision> nodeDecisions;
public MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize) {
public MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize, @Nullable Map<String, Decision> nodeDecisions) {
this.nodesToSize = nodesToSize;
this.nodeDecisions = nodeDecisions;
long highestMatchSize = 0;
DiscoveryNode highestMatchNode = null;
@ -340,5 +360,13 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
public boolean hasAnyData() {
return nodesToSize.isEmpty() == false;
}
/**
* The decisions map for all nodes with a shard copy, if available.
*/
@Nullable
public Map<String, Decision> getNodeDecisions() {
return nodeDecisions;
}
}
}

View File

@ -789,30 +789,30 @@ public class InternalEngine extends Engine {
} catch (Exception e) {
throw new FlushFailedEngineException(shardId, e);
}
}
/*
* we have to inc-ref the store here since if the engine is closed by a tragic event
* we don't acquire the write lock and wait until we have exclusive access. This might also
* dec the store reference which can essentially close the store and unless we can inc the reference
* we can't use it.
*/
store.incRef();
try {
// reread the last committed segment infos
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
} catch (Exception e) {
if (isClosed.get() == false) {
try {
logger.warn("failed to read latest segment infos on flush", e);
} catch (Exception inner) {
e.addSuppressed(inner);
}
if (Lucene.isCorruptionException(e)) {
throw new FlushFailedEngineException(shardId, e);
/*
* we have to inc-ref the store here since if the engine is closed by a tragic event
* we don't acquire the write lock and wait until we have exclusive access. This might also
* dec the store reference which can essentially close the store and unless we can inc the reference
* we can't use it.
*/
store.incRef();
try {
// reread the last committed segment infos
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
} catch (Exception e) {
if (isClosed.get() == false) {
try {
logger.warn("failed to read latest segment infos on flush", e);
} catch (Exception inner) {
e.addSuppressed(inner);
}
if (Lucene.isCorruptionException(e)) {
throw new FlushFailedEngineException(shardId, e);
}
}
} finally {
store.decRef();
}
} finally {
store.decRef();
}
newCommitId = lastCommittedSegmentInfos.getId();
} catch (FlushFailedEngineException ex) {

Some files were not shown because too many files have changed in this diff Show More