Merge branch 'master' into cli-parsing

This commit is contained in:
Ryan Ernst 2016-03-11 10:45:05 -08:00
commit 591fb8f028
346 changed files with 6188 additions and 2984 deletions

View File

@ -116,6 +116,7 @@ subprojects {
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
"org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage',
]
configurations.all {
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->

View File

@ -0,0 +1,98 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.precommit
import org.elasticsearch.gradle.LoggedExec
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.InputFiles
import org.gradle.api.tasks.OutputFile
/**
* Runs LoggerUsageCheck on a set of directories.
*/
public class LoggerUsageTask extends LoggedExec {
/**
* We use a simple "marker" file that we touch when the task succeeds
* as the task output. This is compared against the modified time of the
* inputs (ie the jars/class files).
*/
private File successMarker = new File(project.buildDir, 'markers/loggerUsage')
private FileCollection classpath;
private List<File> classDirectories;
public LoggerUsageTask() {
project.afterEvaluate {
dependsOn(classpath)
description = "Runs LoggerUsageCheck on ${classDirectories}"
executable = new File(project.javaHome, 'bin/java')
if (classDirectories == null) {
classDirectories = []
if (project.sourceSets.findByName("main") && project.sourceSets.main.output.classesDir.exists()) {
classDirectories += [project.sourceSets.main.output.classesDir]
dependsOn project.tasks.classes
}
if (project.sourceSets.findByName("test") && project.sourceSets.test.output.classesDir.exists()) {
classDirectories += [project.sourceSets.test.output.classesDir]
dependsOn project.tasks.testClasses
}
}
doFirst({
args('-cp', getClasspath().asPath, 'org.elasticsearch.test.loggerusage.ESLoggerUsageChecker')
getClassDirectories().each {
args it.getAbsolutePath()
}
})
doLast({
successMarker.parentFile.mkdirs()
successMarker.setText("", 'UTF-8')
})
}
}
@InputFiles
FileCollection getClasspath() {
return classpath
}
void setClasspath(FileCollection classpath) {
this.classpath = classpath
}
@InputFiles
List<File> getClassDirectories() {
return classDirectories
}
void setClassDirectories(List<File> classDirectories) {
this.classDirectories = classDirectories
}
@OutputFile
File getSuccessMarker() {
return successMarker
}
void setSuccessMarker(File successMarker) {
this.successMarker = successMarker
}
}

View File

@ -34,6 +34,7 @@ class PrecommitTasks {
configureForbiddenApis(project),
configureCheckstyle(project),
configureNamingConventions(project),
configureLoggerUsage(project),
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
project.tasks.create('jarHell', JarHellTask.class),
@ -64,20 +65,21 @@ class PrecommitTasks {
internalRuntimeForbidden = true
failOnUnsupportedJava = false
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated']
signaturesURLs = [getClass().getResource('/forbidden/all-signatures.txt')]
signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'),
getClass().getResource('/forbidden/es-all-signatures.txt')]
suppressAnnotations = ['**.SuppressForbidden']
}
Task mainForbidden = project.tasks.findByName('forbiddenApisMain')
if (mainForbidden != null) {
mainForbidden.configure {
bundledSignatures += 'jdk-system-out'
signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt')
signaturesURLs += getClass().getResource('/forbidden/es-core-signatures.txt')
}
}
Task testForbidden = project.tasks.findByName('forbiddenApisTest')
if (testForbidden != null) {
testForbidden.configure {
signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt')
signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt')
}
}
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
@ -117,4 +119,18 @@ class PrecommitTasks {
}
return null
}
private static Task configureLoggerUsage(Project project) {
Task loggerUsageTask = project.tasks.create('loggerUsageCheck', LoggerUsageTask.class)
project.configurations.create('loggerUsagePlugin')
project.dependencies.add('loggerUsagePlugin',
"org.elasticsearch.test:logger-usage:${org.elasticsearch.gradle.VersionProperties.elasticsearch}")
loggerUsageTask.configure {
classpath = project.configurations.loggerUsagePlugin
}
return loggerUsageTask
}
}

View File

@ -0,0 +1,30 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057
java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057
@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
java.util.Random#<init>()
java.util.concurrent.ThreadLocalRandom
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests
@defaultMessage this should not have been added to lucene in the first place
org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()

View File

@ -33,9 +33,6 @@ java.util.Formatter#<init>(java.lang.String,java.lang.String,java.util.Locale)
java.io.RandomAccessFile
java.nio.file.Path#toFile()
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
@defaultMessage Specify a location for the temp file/directory instead.
java.nio.file.Files#createTempDirectory(java.lang.String,java.nio.file.attribute.FileAttribute[])
java.nio.file.Files#createTempFile(java.lang.String,java.lang.String,java.nio.file.attribute.FileAttribute[])
@ -48,9 +45,6 @@ java.io.ObjectInput
java.nio.file.Files#isHidden(java.nio.file.Path) @ Dependent on the operating system, use FileSystemUtils.isHidden instead
java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057
java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057
@defaultMessage Resolve hosts explicitly to the address(es) you want with InetAddress.
java.net.InetSocketAddress#<init>(java.lang.String,int)
java.net.Socket#<init>(java.lang.String,int)
@ -89,9 +83,6 @@ java.lang.Class#getDeclaredMethods() @ Do not violate java's access system: Use
java.lang.reflect.AccessibleObject#setAccessible(boolean)
java.lang.reflect.AccessibleObject#setAccessible(java.lang.reflect.AccessibleObject[], boolean)
@defaultMessage this should not have been added to lucene in the first place
org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
@defaultMessage this method needs special permission
java.lang.Thread#getAllStackTraces()
@ -112,8 +103,3 @@ java.util.Collections#EMPTY_MAP
java.util.Collections#EMPTY_SET
java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness
@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
java.util.Random#<init>()
java.util.concurrent.ThreadLocalRandom
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests

View File

@ -31,6 +31,7 @@ import org.elasticsearch.discovery.DiscoveryStats;
import org.elasticsearch.http.HttpStats;
import org.elasticsearch.indices.NodeIndicesStats;
import org.elasticsearch.indices.breaker.AllCircuitBreakerStats;
import org.elasticsearch.ingest.IngestStats;
import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.monitor.jvm.JvmStats;
import org.elasticsearch.monitor.os.OsStats;
@ -81,6 +82,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
@Nullable
private DiscoveryStats discoveryStats;
@Nullable
private IngestStats ingestStats;
NodeStats() {
}
@ -89,7 +93,8 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
@Nullable FsInfo fs, @Nullable TransportStats transport, @Nullable HttpStats http,
@Nullable AllCircuitBreakerStats breaker,
@Nullable ScriptStats scriptStats,
@Nullable DiscoveryStats discoveryStats) {
@Nullable DiscoveryStats discoveryStats,
@Nullable IngestStats ingestStats) {
super(node);
this.timestamp = timestamp;
this.indices = indices;
@ -103,6 +108,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
this.breaker = breaker;
this.scriptStats = scriptStats;
this.discoveryStats = discoveryStats;
this.ingestStats = ingestStats;
}
public long getTimestamp() {
@ -187,6 +193,11 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
return this.discoveryStats;
}
@Nullable
public IngestStats getIngestStats() {
return ingestStats;
}
public static NodeStats readNodeStats(StreamInput in) throws IOException {
NodeStats nodeInfo = new NodeStats();
nodeInfo.readFrom(in);
@ -224,7 +235,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
scriptStats = in.readOptionalStreamable(ScriptStats::new);
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
ingestStats = in.readOptionalWritable(IngestStats.PROTO);
}
@Override
@ -282,6 +293,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
out.writeOptionalStreamable(breaker);
out.writeOptionalStreamable(scriptStats);
out.writeOptionalStreamable(discoveryStats);
out.writeOptionalWriteable(ingestStats);
}
@Override
@ -337,6 +349,10 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
getDiscoveryStats().toXContent(builder, params);
}
if (getIngestStats() != null) {
getIngestStats().toXContent(builder, params);
}
return builder;
}
}

View File

@ -42,6 +42,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
private boolean breaker;
private boolean script;
private boolean discovery;
private boolean ingest;
public NodesStatsRequest() {
}
@ -69,6 +70,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
this.breaker = true;
this.script = true;
this.discovery = true;
this.ingest = true;
return this;
}
@ -87,6 +89,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
this.breaker = false;
this.script = false;
this.discovery = false;
this.ingest = false;
return this;
}
@ -250,6 +253,17 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
return this;
}
public boolean ingest() {
return ingest;
}
/**
* Should ingest statistics be returned.
*/
public NodesStatsRequest ingest(boolean ingest) {
this.ingest = ingest;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
@ -265,6 +279,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
breaker = in.readBoolean();
script = in.readBoolean();
discovery = in.readBoolean();
ingest = in.readBoolean();
}
@Override
@ -281,6 +296,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
out.writeBoolean(breaker);
out.writeBoolean(script);
out.writeBoolean(discovery);
out.writeBoolean(ingest);
}
}

View File

@ -137,4 +137,12 @@ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder<Nodes
request.discovery(discovery);
return this;
}
/**
* Should ingest statistics be returned.
*/
public NodesStatsRequestBuilder ingest(boolean ingest) {
request.ingest(ingest);
return this;
}
}

View File

@ -80,7 +80,8 @@ public class TransportNodesStatsAction extends TransportNodesAction<NodesStatsRe
protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) {
NodesStatsRequest request = nodeStatsRequest.request;
return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(),
request.fs(), request.transport(), request.http(), request.breaker(), request.script(), request.discovery());
request.fs(), request.transport(), request.http(), request.breaker(), request.script(), request.discovery(),
request.ingest());
}
@Override

View File

@ -99,7 +99,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
@Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false, false);
List<ShardStats> shardsStats = new ArrayList<>();
for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) {

View File

@ -77,7 +77,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
@Override
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
IndexService service = indicesService.indexService(shardRouting.getIndexName());
IndexService service = indicesService.indexService(shardRouting.index());
if (service != null) {
IndexShard shard = service.getShardOrNull(shardRouting.id());
boolean clearedAtLeastOne = false;

View File

@ -93,7 +93,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
@Override
protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndexName());
IndexService indexService = indicesService.indexServiceSafe(shardRouting.index());
IndexShard indexShard = indexService.getShard(shardRouting.id());
return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose()));
}

View File

@ -47,6 +47,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
@ -104,8 +105,9 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
@Override
protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(MetaData metaData, BulkShardRequest request) {
final IndexService indexService = indicesService.indexServiceSafe(request.index());
final IndexShard indexShard = indexService.getShard(request.shardId().id());
ShardId shardId = request.shardId();
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
final IndexShard indexShard = indexService.getShard(shardId.getId());
long[] preVersions = new long[request.items().length];
VersionType[] preVersionTypes = new VersionType[request.items().length];

View File

@ -112,7 +112,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
logger.error("failed to execute pipeline for a bulk request", throwable);
listener.onFailure(throwable);
} else {
long ingestTookInMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - ingestStartTimeInNanos, TimeUnit.NANOSECONDS);
long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos);
BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest();
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener);
if (bulkRequest.requests().isEmpty()) {

View File

@ -32,8 +32,6 @@ import org.elasticsearch.search.SearchShardTarget;
import java.io.IOException;
import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
/**
* Represents a failure to search on a specific shard.
*/
@ -106,7 +104,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
@Override
public int shardId() {
if (shardTarget != null) {
return shardTarget.shardId();
return shardTarget.shardId().id();
}
return -1;
}
@ -133,7 +131,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
@Override
public void readFrom(StreamInput in) throws IOException {
if (in.readBoolean()) {
shardTarget = readSearchShardTarget(in);
shardTarget = new SearchShardTarget(in);
}
reason = in.readString();
status = RestStatus.readFrom(in);

View File

@ -143,7 +143,7 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
throw new IllegalArgumentException("suggest content missing");
}
final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(),
indexService.fieldData(), request.shardId().getIndexName(), request.shardId().id());
indexService.fieldData(), request.shardId());
final Suggest result = suggestPhase.execute(context, searcher.searcher());
return new ShardSuggestResponse(request.shardId(), result);
}

View File

@ -53,6 +53,7 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.IndexShard;
@ -269,7 +270,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
try {
channel.sendResponse(e);
} catch (Throwable e1) {
logger.warn("Failed to send response for " + actionName, e1);
logger.warn("Failed to send response for {}", e1, actionName);
}
}
});
@ -372,18 +373,18 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
private void failReplicaIfNeeded(Throwable t) {
String index = request.shardId().getIndex().getName();
Index index = request.shardId().getIndex();
int shardId = request.shardId().id();
logger.trace("failure on replica [{}][{}], action [{}], request [{}]", t, index, shardId, actionName, request);
if (ignoreReplicaException(t) == false) {
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId);
logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
return;
}
IndexShard indexShard = indexService.getShardOrNull(shardId);
if (indexShard == null) {
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId);
logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
return;
}
indexShard.failShard(actionName + " failed on replica", t);
@ -394,7 +395,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
try {
channel.sendResponse(t);
} catch (IOException responseException) {
logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException);
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
logger.warn("actual Exception", t);
}
}
@ -1106,7 +1107,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
try {
channel.sendResponse(finalResponse);
} catch (IOException responseException) {
logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException);
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
}
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on all replicas [{}] for request [{}]", transportReplicaAction, shardId, replicaRequest);

View File

@ -27,6 +27,7 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
@ -42,8 +43,8 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
protected TimeValue timeout = DEFAULT_TIMEOUT;
protected String index;
// -1 means its not set, allows to explicitly direct a request to a specific shard
protected int shardId = -1;
// null means its not set, allows to explicitly direct a request to a specific shard
protected ShardId shardId = null;
private String concreteIndex;
@ -115,7 +116,11 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
index = in.readString();
shardId = in.readInt();
if (in.readBoolean()) {
shardId = ShardId.readShardId(in);
} else {
shardId = null;
}
timeout = TimeValue.readTimeValue(in);
concreteIndex = in.readOptionalString();
}
@ -124,7 +129,7 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(index);
out.writeInt(shardId);
out.writeOptionalStreamable(shardId);
timeout.writeTo(out);
out.writeOptionalString(concreteIndex);
}

View File

@ -172,7 +172,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
return;
}
request.shardId = shardIt.shardId().id();
request.shardId = shardIt.shardId();
DiscoveryNode node = nodes.get(shard.currentNodeId());
transportService.sendRequest(node, shardActionName, request, transportOptions(), new BaseTransportResponseHandler<Response>() {

View File

@ -75,12 +75,12 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
@Override
protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) {
MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
final MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
final IndexShard indexShard = indexService.getShard(shardId.id());
for (int i = 0; i < request.locations.size(); i++) {
TermVectorsRequest termVectorsRequest = request.requests.get(i);
try {
IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.getShard(shardId.id());
TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest);
termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime());
response.add(request.locations.get(i), termVectorsResponse);

View File

@ -51,6 +51,7 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
@ -147,8 +148,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
@Override
protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) {
if (request.shardId() != -1) {
return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId()).primaryShardIt();
if (request.getShardId() != null) {
return clusterState.routingTable().index(request.concreteIndex()).shard(request.getShardId().getId()).primaryShardIt();
}
ShardIterator shardIterator = clusterService.operationRouting()
.indexShards(clusterState, request.concreteIndex(), request.type(), request.id(), request.routing());
@ -167,8 +168,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
}
protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) {
final IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex());
final IndexShard indexShard = indexService.getShard(request.shardId());
final ShardId shardId = request.getShardId();
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
final IndexShard indexShard = indexService.getShard(shardId.getId());
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
switch (result.operation()) {
case UPSERT:
@ -194,7 +196,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
if (e instanceof VersionConflictEngineException) {
if (retryCount < request.retryOnConflict()) {
logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]",
retryCount + 1, request.retryOnConflict(), request.index(), request.shardId(), request.id());
retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id());
threadPool.executor(executor()).execute(new ActionRunnable<UpdateResponse>(listener) {
@Override
protected void doRun() {
@ -267,9 +269,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
break;
case NONE:
UpdateResponse update = result.action();
IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex());
IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex());
if (indexServiceOrNull != null) {
IndexShard shard = indexService.getShardOrNull(request.shardId());
IndexShard shard = indexService.getShardOrNull(shardId.getId());
if (shard != null) {
shard.noopUpdate(request.type());
}

View File

@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptParameterParser;
import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue;
@ -88,7 +89,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
}
public UpdateRequest(String index, String type, String id) {
this.index = index;
super(index);
this.type = type;
this.id = id;
}
@ -195,7 +196,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return parent;
}
int shardId() {
public ShardId getShardId() {
return this.shardId;
}

View File

@ -76,7 +76,7 @@ class JNANatives {
softLimit = rlimit.rlim_cur.longValue();
hardLimit = rlimit.rlim_max.longValue();
} else {
logger.warn("Unable to retrieve resource limits: " + JNACLibrary.strerror(Native.getLastError()));
logger.warn("Unable to retrieve resource limits: {}", JNACLibrary.strerror(Native.getLastError()));
}
}
} catch (UnsatisfiedLinkError e) {
@ -85,19 +85,20 @@ class JNANatives {
}
// mlockall failed for some reason
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg);
logger.warn("Unable to lock JVM Memory: error={}, reason={}", errno , errMsg);
logger.warn("This can result in part of the JVM being swapped out.");
if (errno == JNACLibrary.ENOMEM) {
if (rlimitSuccess) {
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit));
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: {}, hard limit: {}", rlimitToString(softLimit), rlimitToString(hardLimit));
if (Constants.LINUX) {
// give specific instructions for the linux case to make it easy
String user = System.getProperty("user.name");
logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" +
"\t# allow user '" + user + "' mlockall\n" +
"\t" + user + " soft memlock unlimited\n" +
"\t" + user + " hard memlock unlimited"
);
"\t# allow user '{}' mlockall\n" +
"\t{} soft memlock unlimited\n" +
"\t{} hard memlock unlimited",
user, user, user
);
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
}
} else {
@ -155,7 +156,7 @@ class JNANatives {
// the amount of memory we wish to lock, plus a small overhead (1MB).
SizeT size = new SizeT(JvmInfo.jvmInfo().getMem().getHeapInit().getBytes() + (1024 * 1024));
if (!kernel.SetProcessWorkingSetSize(process, size, size)) {
logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code " + Native.getLastError());
logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code {}", Native.getLastError());
} else {
JNAKernel32Library.MemoryBasicInformation memInfo = new JNAKernel32Library.MemoryBasicInformation();
long address = 0;
@ -188,7 +189,7 @@ class JNANatives {
if (result) {
logger.debug("console ctrl handler correctly set");
} else {
logger.warn("unknown error " + Native.getLastError() + " when adding console ctrl handler:");
logger.warn("unknown error {} when adding console ctrl handler", Native.getLastError());
}
} catch (UnsatisfiedLinkError e) {
// this will have already been logged by Kernel32Library, no need to repeat it

View File

@ -200,7 +200,7 @@ final class JVMCheck {
HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION);
if (bug != null && bug.check()) {
if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) {
Loggers.getLogger(JVMCheck.class).warn(bug.getWarningMessage().get());
Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get());
} else {
throw new RuntimeException(bug.getErrorMessage());
}

View File

@ -394,7 +394,7 @@ final class Seccomp {
method = 0;
int errno1 = Native.getLastError();
if (logger.isDebugEnabled()) {
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): " + JNACLibrary.strerror(errno1) + ", falling back to prctl(PR_SET_SECCOMP)...");
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1));
}
if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) {
int errno2 = Native.getLastError();

View File

@ -119,7 +119,7 @@ public class TransportClientNodesService extends AbstractComponent {
this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
if (logger.isDebugEnabled()) {
logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
logger.debug("node_sampler_interval[{}]", nodesSamplerInterval);
}
if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
@ -318,7 +318,7 @@ public class TransportClientNodesService extends AbstractComponent {
transportService.connectToNode(node);
} catch (Throwable e) {
it.remove();
logger.debug("failed to connect to discovered node [" + node + "]", e);
logger.debug("failed to connect to discovered node [{}]", e, node);
}
}
}

View File

@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.index.Index;
import java.util.ArrayList;
import java.util.Collections;
@ -120,7 +121,7 @@ public class ClusterChangedEvent {
/**
* Returns the indices deleted in this event
*/
public List<String> indicesDeleted() {
public List<Index> indicesDeleted() {
// If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected
// master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data;
// rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous
@ -131,17 +132,18 @@ public class ClusterChangedEvent {
if (metaDataChanged() == false || isNewCluster()) {
return Collections.emptyList();
}
List<String> deleted = null;
for (ObjectCursor<String> cursor : previousState.metaData().indices().keys()) {
String index = cursor.value;
if (!state.metaData().hasIndex(index)) {
List<Index> deleted = null;
for (ObjectCursor<IndexMetaData> cursor : previousState.metaData().indices().values()) {
IndexMetaData index = cursor.value;
IndexMetaData current = state.metaData().index(index.getIndex().getName());
if (current == null || index.getIndexUUID().equals(current.getIndexUUID()) == false) {
if (deleted == null) {
deleted = new ArrayList<>();
}
deleted.add(index);
deleted.add(index.getIndex());
}
}
return deleted == null ? Collections.<String>emptyList() : deleted;
return deleted == null ? Collections.<Index>emptyList() : deleted;
}
/**

View File

@ -136,6 +136,7 @@ public class ClusterModule extends AbstractModule {
bind(AllocationService.class).asEagerSingleton();
bind(DiscoveryNodeService.class).asEagerSingleton();
bind(ClusterService.class).to(InternalClusterService.class).asEagerSingleton();
bind(NodeConnectionsService.class).asEagerSingleton();
bind(OperationRouting.class).asEagerSingleton();
bind(MetaDataCreateIndexService.class).asEagerSingleton();
bind(MetaDataDeleteIndexService.class).asEagerSingleton();

View File

@ -26,7 +26,6 @@ import org.elasticsearch.cluster.service.PendingClusterTask;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.tasks.TaskManager;
import java.util.List;
@ -154,9 +153,4 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
* @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue
*/
TimeValue getMaxTaskWaitTime();
/**
* Returns task manager created in the cluster service
*/
TaskManager getTaskManager();
}

View File

@ -0,0 +1,156 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.KeyedLock;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ScheduledFuture;
/**
* This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are
* removed. Also, it periodically checks that all connections are still open and if needed restores them.
* Note that this component is *not* responsible for removing nodes from the cluster if they disconnect / do not respond
* to pings. This is done by {@link org.elasticsearch.discovery.zen.fd.NodesFaultDetection}. Master fault detection
* is done by {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection}.
*/
public class NodeConnectionsService extends AbstractLifecycleComponent<NodeConnectionsService> {
public static final Setting<TimeValue> CLUSTER_NODE_RECONNECT_INTERVAL_SETTING =
Setting.positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
private final ThreadPool threadPool;
private final TransportService transportService;
// map between current node and the number of failed connection attempts. 0 means successfully connected.
// if a node doesn't appear in this list it shouldn't be monitored
private ConcurrentMap<DiscoveryNode, Integer> nodes = ConcurrentCollections.newConcurrentMap();
final private KeyedLock<DiscoveryNode> nodeLocks = new KeyedLock<>();
private final TimeValue reconnectInterval;
private volatile ScheduledFuture<?> backgroundFuture = null;
@Inject
public NodeConnectionsService(Settings settings, ThreadPool threadPool, TransportService transportService) {
super(settings);
this.threadPool = threadPool;
this.transportService = transportService;
this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings);
}
public void connectToAddedNodes(ClusterChangedEvent event) {
// TODO: do this in parallel (and wait)
for (final DiscoveryNode node : event.nodesDelta().addedNodes()) {
try (Releasable ignored = nodeLocks.acquire(node)) {
Integer current = nodes.put(node, 0);
assert current == null : "node " + node + " was added in event but already in internal nodes";
validateNodeConnected(node);
}
}
}
public void disconnectFromRemovedNodes(ClusterChangedEvent event) {
for (final DiscoveryNode node : event.nodesDelta().removedNodes()) {
try (Releasable ignored = nodeLocks.acquire(node)) {
Integer current = nodes.remove(node);
assert current != null : "node " + node + " was removed in event but not in internal nodes";
try {
transportService.disconnectFromNode(node);
} catch (Throwable e) {
logger.warn("failed to disconnect to node [" + node + "]", e);
}
}
}
}
void validateNodeConnected(DiscoveryNode node) {
assert nodeLocks.isHeldByCurrentThread(node) : "validateNodeConnected must be called under lock";
if (lifecycle.stoppedOrClosed() ||
nodes.containsKey(node) == false) { // we double check existence of node since connectToNode might take time...
// nothing to do
} else {
try {
// connecting to an already connected node is a noop
transportService.connectToNode(node);
nodes.put(node, 0);
} catch (Exception e) {
Integer nodeFailureCount = nodes.get(node);
assert nodeFailureCount != null : node + " didn't have a counter in nodes map";
nodeFailureCount = nodeFailureCount + 1;
// log every 6th failure
if ((nodeFailureCount % 6) == 1) {
logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount);
}
nodes.put(node, nodeFailureCount);
}
}
}
class ConnectionChecker extends AbstractRunnable {
@Override
public void onFailure(Throwable t) {
logger.warn("unexpected error while checking for node reconnects", t);
}
protected void doRun() {
for (DiscoveryNode node : nodes.keySet()) {
try (Releasable ignored = nodeLocks.acquire(node)) {
validateNodeConnected(node);
}
}
}
@Override
public void onAfter() {
if (lifecycle.started()) {
backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this);
}
}
}
@Override
protected void doStart() {
backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ConnectionChecker());
}
@Override
protected void doStop() {
FutureUtils.cancel(backgroundFuture);
}
@Override
protected void doClose() {
}
}

View File

@ -69,15 +69,17 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
private final State state;
private final SnapshotId snapshotId;
private final boolean includeGlobalState;
private final boolean partial;
private final ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards;
private final List<String> indices;
private final ImmutableOpenMap<String, List<ShardId>> waitingIndices;
private final long startTime;
public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, List<String> indices, long startTime, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
public Entry(SnapshotId snapshotId, boolean includeGlobalState, boolean partial, State state, List<String> indices, long startTime, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
this.state = state;
this.snapshotId = snapshotId;
this.includeGlobalState = includeGlobalState;
this.partial = partial;
this.indices = indices;
this.startTime = startTime;
if (shards == null) {
@ -90,7 +92,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
}
public Entry(Entry entry, State state, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
this(entry.snapshotId, entry.includeGlobalState, state, entry.indices, entry.startTime, shards);
this(entry.snapshotId, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards);
}
public Entry(Entry entry, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
@ -121,6 +123,10 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
return includeGlobalState;
}
public boolean partial() {
return partial;
}
public long startTime() {
return startTime;
}
@ -133,6 +139,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
Entry entry = (Entry) o;
if (includeGlobalState != entry.includeGlobalState) return false;
if (partial != entry.partial) return false;
if (startTime != entry.startTime) return false;
if (!indices.equals(entry.indices)) return false;
if (!shards.equals(entry.shards)) return false;
@ -148,6 +155,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
int result = state.hashCode();
result = 31 * result + snapshotId.hashCode();
result = 31 * result + (includeGlobalState ? 1 : 0);
result = 31 * result + (partial ? 1 : 0);
result = 31 * result + shards.hashCode();
result = 31 * result + indices.hashCode();
result = 31 * result + waitingIndices.hashCode();
@ -360,6 +368,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
for (int i = 0; i < entries.length; i++) {
SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
boolean includeGlobalState = in.readBoolean();
boolean partial = in.readBoolean();
State state = State.fromValue(in.readByte());
int indices = in.readVInt();
List<String> indexBuilder = new ArrayList<>();
@ -375,7 +384,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
State shardState = State.fromValue(in.readByte());
builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState));
}
entries[i] = new Entry(snapshotId, includeGlobalState, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build());
entries[i] = new Entry(snapshotId, includeGlobalState, partial, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build());
}
return new SnapshotsInProgress(entries);
}
@ -386,6 +395,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
for (Entry entry : entries) {
entry.snapshotId().writeTo(out);
out.writeBoolean(entry.includeGlobalState());
out.writeBoolean(entry.partial());
out.writeByte(entry.state().value());
out.writeVInt(entry.indices().size());
for (String index : entry.indices()) {
@ -406,6 +416,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots");
static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot");
static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state");
static final XContentBuilderString PARTIAL = new XContentBuilderString("partial");
static final XContentBuilderString STATE = new XContentBuilderString("state");
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis");
@ -431,6 +442,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository());
builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot());
builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState());
builder.field(Fields.PARTIAL, entry.partial());
builder.field(Fields.STATE, entry.state());
builder.startArray(Fields.INDICES);
{

View File

@ -81,13 +81,13 @@ public class NodeIndexDeletedAction extends AbstractComponent {
transportService.sendRequest(clusterState.nodes().masterNode(),
INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
if (nodes.localNode().isDataNode() == false) {
logger.trace("[{}] not acking store deletion (not a data node)");
logger.trace("[{}] not acking store deletion (not a data node)", index);
return;
}
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Throwable t) {
logger.warn("[{}]failed to ack index store deleted for index", t, index);
logger.warn("[{}] failed to ack index store deleted for index", t, index);
}
@Override

View File

@ -151,7 +151,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void onNewClusterState(ClusterState state) {
if (logger.isTraceEnabled()) {
logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", shardRoutingEntry.getShardRouting().shardId(), state.prettyPrint(), shardRoutingEntry);
logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", state.prettyPrint(), shardRoutingEntry);
}
sendShardAction(actionName, observer, shardRoutingEntry, listener);
}
@ -321,7 +321,7 @@ public class ShardStateAction extends AbstractComponent {
if (numberOfUnassignedShards > 0) {
String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards);
if (logger.isTraceEnabled()) {
logger.trace(reason + ", scheduling a reroute");
logger.trace("{}, scheduling a reroute", reason);
}
routingService.reroute(reason);
}

View File

@ -686,7 +686,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
}
private boolean isEmptyOrTrivialWildcard(List<String> expressions) {
return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0))) || Regex.isMatchAllPattern(expressions.get(0)));
return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0))));
}
private List<String> resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData, boolean assertEmpty) {

View File

@ -53,6 +53,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.DocumentMapper;
@ -188,7 +189,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
boolean indexCreated = false;
Index createdIndex = null;
String removalReason = null;
try {
validate(request, currentState);
@ -308,10 +309,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// Set up everything, now locally create the index to see that things are ok, and apply
final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build();
// create the index here (on the master) to validate it can be created, as well as adding the mapping
indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
indexCreated = true;
final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
createdIndex = indexService.index();
// now add the mappings
IndexService indexService = indicesService.indexServiceSafe(request.index());
MapperService mapperService = indexService.mapperService();
// first, add the default mapping
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
@ -415,9 +415,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
removalReason = "cleaning up after validating index on master";
return updatedState;
} finally {
if (indexCreated) {
if (createdIndex != null) {
// Index was already partially created - need to clean up
indicesService.removeIndex(request.index(), removalReason != null ? removalReason : "failed to create index");
indicesService.removeIndex(createdIndex, removalReason != null ? removalReason : "failed to create index");
}
}
}

View File

@ -34,11 +34,12 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.Arrays;
import java.util.Collection;
import java.util.Set;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@ -67,7 +68,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
}
public void deleteIndices(final Request request, final Listener userListener) {
Collection<String> indices = Arrays.asList(request.indices);
Set<String> indices = Sets.newHashSet(request.indices);
final DeleteIndexListener listener = new DeleteIndexListener(userListener);
clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) {
@ -84,6 +85,9 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
@Override
public ClusterState execute(final ClusterState currentState) {
// Check if index deletion conflicts with any running snapshots
SnapshotsService.checkIndexDeletion(currentState, indices);
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData());
ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks());

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider;
@ -74,7 +75,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
@Override
public ClusterState execute(final ClusterState currentState) {
List<String> indicesToClose = new ArrayList<>();
List<Index> indicesToClose = new ArrayList<>();
Map<String, IndexService> indices = new HashMap<>();
try {
for (AliasAction aliasAction : request.actions()) {
@ -112,7 +113,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex());
continue;
}
indicesToClose.add(indexMetaData.getIndex().getName());
indicesToClose.add(indexMetaData.getIndex());
}
indices.put(indexMetaData.getIndex().getName(), indexService);
}
@ -153,7 +154,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
}
return currentState;
} finally {
for (String index : indicesToClose) {
for (Index index : indicesToClose) {
indicesService.removeIndex(index, "created for alias processing");
}
}

View File

@ -19,14 +19,12 @@
package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest;
import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.RestoreInProgress;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -39,8 +37,9 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.snapshots.RestoreService;
import org.elasticsearch.snapshots.SnapshotsService;
import java.util.ArrayList;
import java.util.Arrays;
@ -99,27 +98,10 @@ public class MetaDataIndexStateService extends AbstractComponent {
return currentState;
}
// Check if any of the indices to be closed are currently being restored from a snapshot and fail closing if such an index
// is found as closing an index that is being restored makes the index unusable (it cannot be recovered).
RestoreInProgress restore = currentState.custom(RestoreInProgress.TYPE);
if (restore != null) {
Set<String> indicesToFail = null;
for (RestoreInProgress.Entry entry : restore.entries()) {
for (ObjectObjectCursor<ShardId, RestoreInProgress.ShardRestoreStatus> shard : entry.shards()) {
if (!shard.value.state().completed()) {
if (indicesToClose.contains(shard.key.getIndexName())) {
if (indicesToFail == null) {
indicesToFail = new HashSet<>();
}
indicesToFail.add(shard.key.getIndexName());
}
}
}
}
if (indicesToFail != null) {
throw new IllegalArgumentException("Cannot close indices that are being restored: " + indicesToFail);
}
}
// Check if index closing conflicts with any running restores
RestoreService.checkIndexClosing(currentState, indicesToClose);
// Check if index closing conflicts with any running snapshots
SnapshotsService.checkIndexClosing(currentState, indicesToClose);
logger.info("closing indices [{}]", indicesAsString);

View File

@ -36,6 +36,7 @@ import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider;
@ -112,13 +113,13 @@ public class MetaDataMappingService extends AbstractComponent {
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
for (Map.Entry<String, List<RefreshTask>> entry : tasksPerIndex.entrySet()) {
String index = entry.getKey();
IndexMetaData indexMetaData = mdBuilder.get(index);
IndexMetaData indexMetaData = mdBuilder.get(entry.getKey());
if (indexMetaData == null) {
// index got deleted on us, ignore...
logger.debug("[{}] ignoring tasks - index meta data doesn't exist", index);
logger.debug("[{}] ignoring tasks - index meta data doesn't exist", entry.getKey());
continue;
}
final Index index = indexMetaData.getIndex();
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
// the latest (based on order) update mapping one per node
List<RefreshTask> allIndexTasks = entry.getValue();
@ -127,7 +128,7 @@ public class MetaDataMappingService extends AbstractComponent {
if (indexMetaData.isSameUUID(task.indexUUID)) {
hasTaskWithRightUUID = true;
} else {
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
logger.debug("{} ignoring task [{}] - index meta data doesn't match task uuid", index, task);
}
}
if (hasTaskWithRightUUID == false) {
@ -136,7 +137,7 @@ public class MetaDataMappingService extends AbstractComponent {
// construct the actual index if needed, and make sure the relevant mappings are there
boolean removeIndex = false;
IndexService indexService = indicesService.indexService(index);
IndexService indexService = indicesService.indexService(indexMetaData.getIndex());
if (indexService == null) {
// we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
@ -208,47 +209,57 @@ public class MetaDataMappingService extends AbstractComponent {
class PutMappingExecutor implements ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> {
@Override
public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState, List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
Set<String> indicesToClose = new HashSet<>();
public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState,
List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
Set<Index> indicesToClose = new HashSet<>();
BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder();
try {
// precreate incoming indices;
for (PutMappingClusterStateUpdateRequest request : tasks) {
// failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up
for (String index : request.indices()) {
final IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData != null && indicesService.hasIndex(index) == false) {
// if we don't have the index, we will throw exceptions later;
indicesToClose.add(index);
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
// add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
final List<Index> indices = new ArrayList<>(request.indices().length);
try {
for (String index : request.indices()) {
final IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData != null) {
if (indicesService.hasIndex(indexMetaData.getIndex()) == false) {
// if the index does not exists we create it once, add all types to the mapper service and
// close it later once we are done with mapping update
indicesToClose.add(indexMetaData.getIndex());
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData,
Collections.emptyList());
// add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(),
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
}
}
indices.add(indexMetaData.getIndex());
} else {
// we didn't find the index in the clusterstate - maybe it was deleted
// NOTE: this doesn't fail the entire batch only the current PutMapping request we are processing
throw new IndexNotFoundException(index);
}
}
}
}
for (PutMappingClusterStateUpdateRequest request : tasks) {
try {
currentState = applyRequest(currentState, request);
currentState = applyRequest(currentState, request, indices);
builder.success(request);
} catch (Throwable t) {
builder.failure(request, t);
}
}
return builder.build(currentState);
} finally {
for (String index : indicesToClose) {
for (Index index : indicesToClose) {
indicesService.removeIndex(index, "created for mapping processing");
}
}
}
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request,
List<Index> indices) throws IOException {
String mappingType = request.type();
CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
for (String index : request.indices()) {
final MetaData metaData = currentState.metaData();
for (Index index : indices) {
IndexService indexService = indicesService.indexServiceSafe(index);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
DocumentMapper newMapper;
@ -270,7 +281,7 @@ public class MetaDataMappingService extends AbstractComponent {
// and a put mapping api call, so we don't which type did exist before.
// Also the order of the mappings may be backwards.
if (newMapper.parentFieldMapper().active()) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
IndexMetaData indexMetaData = metaData.index(index);
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
@ -290,11 +301,11 @@ public class MetaDataMappingService extends AbstractComponent {
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
}
MetaData.Builder builder = MetaData.builder(currentState.metaData());
for (String index : request.indices()) {
MetaData.Builder builder = MetaData.builder(metaData);
for (Index index : indices) {
// do the actual merge here on the master, and update the mapping source
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
if (indexService == null) { // TODO this seems impossible given we use indexServiceSafe above
continue;
}
@ -326,7 +337,7 @@ public class MetaDataMappingService extends AbstractComponent {
}
}
IndexMetaData indexMetaData = currentState.metaData().index(index);
IndexMetaData indexMetaData = metaData.index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}

View File

@ -19,24 +19,40 @@
package org.elasticsearch.cluster.node;
import org.elasticsearch.Version;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.CopyOnWriteArrayList;
/**
*/
public class DiscoveryNodeService extends AbstractComponent {
public static final Setting<Long> NODE_ID_SEED_SETTING =
// don't use node.id.seed so it won't be seen as an attribute
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
private final List<CustomAttributesProvider> customAttributesProviders = new CopyOnWriteArrayList<>();
private final Version version;
@Inject
public DiscoveryNodeService(Settings settings) {
public DiscoveryNodeService(Settings settings, Version version) {
super(settings);
this.version = version;
}
public static String generateNodeId(Settings settings) {
Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
return Strings.randomBase64UUID(random);
}
public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) {
@ -44,7 +60,7 @@ public class DiscoveryNodeService extends AbstractComponent {
return this;
}
public Map<String, String> buildAttributes() {
public DiscoveryNode buildLocalNode(TransportAddress publishAddress) {
Map<String, String> attributes = new HashMap<>(settings.getByPrefix("node.").getAsMap());
attributes.remove("name"); // name is extracted in other places
if (attributes.containsKey("client")) {
@ -76,10 +92,11 @@ public class DiscoveryNodeService extends AbstractComponent {
}
}
return attributes;
final String nodeId = generateNodeId(settings);
return new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, attributes, version);
}
public static interface CustomAttributesProvider {
public interface CustomAttributesProvider {
Map<String, String> buildAttributes();
}

View File

@ -597,6 +597,13 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
/**
* Returns the number of routing nodes
*/
public int size() {
return nodesToShards.size();
}
public static final class UnassignedShards implements Iterable<ShardRouting> {
private final RoutingNodes nodes;

View File

@ -20,7 +20,6 @@
package org.elasticsearch.cluster.routing.allocation;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.util.ArrayUtil;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
@ -36,13 +35,13 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.GatewayAllocator;
import java.util.ArrayList;
import java.util.Collections;
@ -63,14 +62,17 @@ import java.util.stream.Collectors;
public class AllocationService extends AbstractComponent {
private final AllocationDeciders allocationDeciders;
private final GatewayAllocator gatewayAllocator;
private final ShardsAllocator shardsAllocator;
private final ClusterInfoService clusterInfoService;
private final ShardsAllocators shardsAllocators;
@Inject
public AllocationService(Settings settings, AllocationDeciders allocationDeciders, ShardsAllocators shardsAllocators, ClusterInfoService clusterInfoService) {
public AllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator,
ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) {
super(settings);
this.allocationDeciders = allocationDeciders;
this.shardsAllocators = shardsAllocators;
this.gatewayAllocator = gatewayAllocator;
this.shardsAllocator = shardsAllocator;
this.clusterInfoService = clusterInfoService;
}
@ -92,7 +94,7 @@ public class AllocationService extends AbstractComponent {
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
}
shardsAllocators.applyStartedShards(allocation);
gatewayAllocator.applyStartedShards(allocation);
if (withReroute) {
reroute(allocation);
}
@ -192,7 +194,7 @@ public class AllocationService extends AbstractComponent {
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
}
shardsAllocators.applyFailedShards(allocation);
gatewayAllocator.applyFailedShards(allocation);
reroute(allocation);
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
@ -306,14 +308,10 @@ public class AllocationService extends AbstractComponent {
if (allocation.routingNodes().unassigned().size() > 0) {
updateLeftDelayOfUnassignedShards(allocation, settings);
changed |= shardsAllocators.allocateUnassigned(allocation);
changed |= gatewayAllocator.allocateUnassigned(allocation);
}
// move shards that no longer can be allocated
changed |= shardsAllocators.moveShards(allocation);
// rebalance
changed |= shardsAllocators.rebalance(allocation);
changed |= shardsAllocator.allocate(allocation);
assert RoutingNodes.assertShardStats(allocation.routingNodes());
return changed;
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.cluster.routing.allocation.allocator;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.IntroSorter;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -28,9 +27,7 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
@ -42,18 +39,14 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.PriorityComparator;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
@ -103,27 +96,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
@Override
public void applyStartedShards(StartedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ }
@Override
public void applyFailedShards(FailedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ }
@Override
public boolean allocateUnassigned(RoutingAllocation allocation) {
public boolean allocate(RoutingAllocation allocation) {
if (allocation.routingNodes().size() == 0) {
/* with no nodes this is pointless */
return false;
}
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
return balancer.allocateUnassigned();
}
@Override
public boolean rebalance(RoutingAllocation allocation) {
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
return balancer.balance();
}
@Override
public boolean moveShards(RoutingAllocation allocation) {
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
return balancer.moveShards();
boolean changed = balancer.allocateUnassigned();
changed |= balancer.moveShards();
changed |= balancer.balance();
return changed;
}
/**
@ -203,8 +185,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
private float weight(Balancer balancer, ModelNode node, String index, int numAdditionalShards) {
final float weightShard = (node.numShards() + numAdditionalShards - balancer.avgShardsPerNode());
final float weightIndex = (node.numShards(index) + numAdditionalShards - balancer.avgShardsPerNode(index));
final float weightShard = node.numShards() + numAdditionalShards - balancer.avgShardsPerNode();
final float weightIndex = node.numShards(index) + numAdditionalShards - balancer.avgShardsPerNode(index);
return theta0 * weightShard + theta1 * weightIndex;
}
@ -216,7 +198,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
public static class Balancer {
private final ESLogger logger;
private final Map<String, ModelNode> nodes = new HashMap<>();
private final HashSet<String> indices = new HashSet<>();
private final RoutingAllocation allocation;
private final RoutingNodes routingNodes;
private final WeightFunction weight;
@ -225,19 +206,15 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
private final MetaData metaData;
private final float avgShardsPerNode;
private final Predicate<ShardRouting> assignedFilter = shard -> shard.assignedToNode();
public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
this.logger = logger;
this.allocation = allocation;
this.weight = weight;
this.threshold = threshold;
this.routingNodes = allocation.routingNodes();
for (RoutingNode node : routingNodes) {
nodes.put(node.nodeId(), new ModelNode(node.nodeId()));
}
metaData = routingNodes.metaData();
avgShardsPerNode = ((float) metaData.totalNumberOfShards()) / nodes.size();
avgShardsPerNode = ((float) metaData.totalNumberOfShards()) / routingNodes.size();
buildModelFromAssigned();
}
/**
@ -271,17 +248,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return new NodeSorter(nodesArray(), weight, this);
}
private boolean initialize(RoutingNodes routing, RoutingNodes.UnassignedShards unassigned) {
if (logger.isTraceEnabled()) {
logger.trace("Start distributing Shards");
}
for (ObjectCursor<String> index : allocation.routingTable().indicesRouting().keys()) {
indices.add(index.value);
}
buildModelFromAssigned(routing.shards(assignedFilter));
return allocateUnassigned(unassigned);
}
private static float absDelta(float lower, float higher) {
assert higher >= lower : higher + " lt " + lower +" but was expected to be gte";
return Math.abs(higher - lower);
@ -295,12 +261,36 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
/**
* Allocates all possible unassigned shards
* Balances the nodes on the cluster model according to the weight function.
* The actual balancing is delegated to {@link #balanceByWeights()}
*
* @return <code>true</code> if the current configuration has been
* changed, otherwise <code>false</code>
*/
final boolean allocateUnassigned() {
return balance(true);
private boolean balance() {
if (logger.isTraceEnabled()) {
logger.trace("Start balancing cluster");
}
if (allocation.hasPendingAsyncFetch()) {
/*
* see https://github.com/elastic/elasticsearch/issues/14387
* if we allow rebalance operations while we are still fetching shard store data
* we might end up with unnecessary rebalance operations which can be super confusion/frustrating
* since once the fetches come back we might just move all the shards back again.
* Therefore we only do a rebalance if we have fetched all information.
*/
logger.debug("skipping rebalance due to in-flight shard/store fetches");
return false;
}
if (allocation.deciders().canRebalance(allocation).type() != Type.YES) {
logger.trace("skipping rebalance as it is disabled");
return false;
}
if (nodes.size() < 2) { /* skip if we only have one node */
logger.trace("skipping rebalance as single node only");
return false;
}
return balanceByWeights();
}
/**
@ -317,120 +307,100 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* @return <code>true</code> if the current configuration has been
* changed, otherwise <code>false</code>
*/
public boolean balance() {
return balance(false);
}
private boolean balanceByWeights() {
boolean changed = false;
final NodeSorter sorter = newNodeSorter();
final AllocationDeciders deciders = allocation.deciders();
final ModelNode[] modelNodes = sorter.modelNodes;
final float[] weights = sorter.weights;
for (String index : buildWeightOrderedIndices(sorter)) {
IndexMetaData indexMetaData = metaData.index(index);
private boolean balance(boolean onlyAssign) {
if (this.nodes.isEmpty()) {
/* with no nodes this is pointless */
return false;
}
if (logger.isTraceEnabled()) {
if (onlyAssign) {
logger.trace("Start balancing cluster");
} else {
logger.trace("Start assigning unassigned shards");
// find nodes that have a shard of this index or where shards of this index are allowed to stay
// move these nodes to the front of modelNodes so that we can only balance based on these nodes
int relevantNodes = 0;
for (int i = 0; i < modelNodes.length; i++) {
ModelNode modelNode = modelNodes[i];
if (modelNode.getIndex(index) != null
|| deciders.canAllocate(indexMetaData, modelNode.getRoutingNode(), allocation).type() != Type.NO) {
// swap nodes at position i and relevantNodes
modelNodes[i] = modelNodes[relevantNodes];
modelNodes[relevantNodes] = modelNode;
relevantNodes++;
}
}
}
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
boolean changed = initialize(routingNodes, unassigned);
if (onlyAssign == false && changed == false && allocation.deciders().canRebalance(allocation).type() == Type.YES) {
NodeSorter sorter = newNodeSorter();
if (nodes.size() > 1) { /* skip if we only have one node */
AllocationDeciders deciders = allocation.deciders();
final ModelNode[] modelNodes = sorter.modelNodes;
final float[] weights = sorter.weights;
for (String index : buildWeightOrderedIndices(sorter)) {
IndexMetaData indexMetaData = metaData.index(index);
// find nodes that have a shard of this index or where shards of this index are allowed to stay
// move these nodes to the front of modelNodes so that we can only balance based on these nodes
int relevantNodes = 0;
for (int i = 0; i < modelNodes.length; i++) {
ModelNode modelNode = modelNodes[i];
if (modelNode.getIndex(index) != null
|| deciders.canAllocate(indexMetaData, modelNode.getRoutingNode(routingNodes), allocation).type() != Type.NO) {
// swap nodes at position i and relevantNodes
modelNodes[i] = modelNodes[relevantNodes];
modelNodes[relevantNodes] = modelNode;
relevantNodes++;
if (relevantNodes < 2) {
continue;
}
sorter.reset(index, 0, relevantNodes);
int lowIdx = 0;
int highIdx = relevantNodes - 1;
while (true) {
final ModelNode minNode = modelNodes[lowIdx];
final ModelNode maxNode = modelNodes[highIdx];
advance_range:
if (maxNode.numShards(index) > 0) {
final float delta = absDelta(weights[lowIdx], weights[highIdx]);
if (lessThan(delta, threshold)) {
if (lowIdx > 0 && highIdx-1 > 0 // is there a chance for a higher delta?
&& (absDelta(weights[0], weights[highIdx-1]) > threshold) // check if we need to break at all
) {
/* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible
* due to some allocation decider restrictions like zone awareness. if one zone has for instance
* less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we
* can't move to the "lighter" shards since otherwise the zone would go over capacity.
*
* This break jumps straight to the condition below were we start moving from the high index towards
* the low index to shrink the window we are considering for balance from the other direction.
* (check shrinking the window from MAX to MIN)
* See #3580
*/
break advance_range;
}
if (logger.isTraceEnabled()) {
logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]",
index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);
}
break;
}
if (relevantNodes < 2) {
if (logger.isTraceEnabled()) {
logger.trace("Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]",
maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);
}
/* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes.
* a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */
if (tryRelocateShard(minNode, maxNode, index, delta)) {
/*
* TODO we could be a bit smarter here, we don't need to fully sort necessarily
* we could just find the place to insert linearly but the win might be minor
* compared to the added complexity
*/
weights[lowIdx] = sorter.weight(modelNodes[lowIdx]);
weights[highIdx] = sorter.weight(modelNodes[highIdx]);
sorter.sort(0, relevantNodes);
lowIdx = 0;
highIdx = relevantNodes - 1;
changed = true;
continue;
}
sorter.reset(index, 0, relevantNodes);
int lowIdx = 0;
int highIdx = relevantNodes - 1;
while (true) {
final ModelNode minNode = modelNodes[lowIdx];
final ModelNode maxNode = modelNodes[highIdx];
advance_range:
if (maxNode.numShards(index) > 0) {
final float delta = absDelta(weights[lowIdx], weights[highIdx]);
if (lessThan(delta, threshold)) {
if (lowIdx > 0 && highIdx-1 > 0 // is there a chance for a higher delta?
&& (absDelta(weights[0], weights[highIdx-1]) > threshold) // check if we need to break at all
) {
/* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible
* due to some allocation decider restrictions like zone awareness. if one zone has for instance
* less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we
* can't move to the "lighter" shards since otherwise the zone would go over capacity.
*
* This break jumps straight to the condition below were we start moving from the high index towards
* the low index to shrink the window we are considering for balance from the other direction.
* (check shrinking the window from MAX to MIN)
* See #3580
*/
break advance_range;
}
if (logger.isTraceEnabled()) {
logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]",
index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);
}
break;
}
if (logger.isTraceEnabled()) {
logger.trace("Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]",
maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);
}
/* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes.
* a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */
if (tryRelocateShard(minNode, maxNode, index, delta)) {
/*
* TODO we could be a bit smarter here, we don't need to fully sort necessarily
* we could just find the place to insert linearly but the win might be minor
* compared to the added complexity
*/
weights[lowIdx] = sorter.weight(modelNodes[lowIdx]);
weights[highIdx] = sorter.weight(modelNodes[highIdx]);
sorter.sort(0, relevantNodes);
lowIdx = 0;
highIdx = relevantNodes - 1;
changed = true;
continue;
}
}
if (lowIdx < highIdx - 1) {
/* Shrinking the window from MIN to MAX
* we can't move from any shard from the min node lets move on to the next node
* and see if the threshold still holds. We either don't have any shard of this
* index on this node of allocation deciders prevent any relocation.*/
lowIdx++;
} else if (lowIdx > 0) {
/* Shrinking the window from MAX to MIN
* now we go max to min since obviously we can't move anything to the max node
* lets pick the next highest */
lowIdx = 0;
highIdx--;
} else {
/* we are done here, we either can't relocate anymore or we are balanced */
break;
}
}
}
if (lowIdx < highIdx - 1) {
/* Shrinking the window from MIN to MAX
* we can't move from any shard from the min node lets move on to the next node
* and see if the threshold still holds. We either don't have any shard of this
* index on this node of allocation deciders prevent any relocation.*/
lowIdx++;
} else if (lowIdx > 0) {
/* Shrinking the window from MAX to MIN
* now we go max to min since obviously we can't move anything to the max node
* lets pick the next highest */
lowIdx = 0;
highIdx--;
} else {
/* we are done here, we either can't relocate anymore or we are balanced */
break;
}
}
}
@ -451,7 +421,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* to the nodes we relocated them from.
*/
private String[] buildWeightOrderedIndices(NodeSorter sorter) {
final String[] indices = this.indices.toArray(new String[this.indices.size()]);
final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class);
final float[] deltas = new float[indices.length];
for (int i = 0; i < deltas.length; i++) {
sorter.reset(indices[i]);
@ -503,20 +473,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
*/
public boolean moveShards() {
if (nodes.isEmpty()) {
/* with no nodes this is pointless */
return false;
}
// Create a copy of the started shards interleaving between nodes, and check if they can remain. In the presence of throttling
// Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling
// shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are
// offloading the shards.
List<ShardRouting> shards = new ArrayList<>();
boolean changed = false;
int index = 0;
boolean found = true;
final NodeSorter sorter = newNodeSorter();
while (found) {
found = false;
for (RoutingNode routingNode : routingNodes) {
for (RoutingNode routingNode : allocation.routingNodes()) {
if (index >= routingNode.size()) {
continue;
}
@ -524,64 +490,52 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
ShardRouting shardRouting = routingNode.get(index);
// we can only move started shards...
if (shardRouting.started()) {
shards.add(shardRouting);
final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
assert sourceNode != null && sourceNode.containsShard(shardRouting);
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
if (decision.type() == Decision.Type.NO) {
changed |= moveShard(sorter, shardRouting, sourceNode, routingNode);
}
}
}
index++;
}
if (shards.isEmpty()) {
return false;
}
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
boolean changed = initialize(routingNodes, unassigned);
if (changed == false) {
final NodeSorter sorter = newNodeSorter();
final ModelNode[] modelNodes = sorter.modelNodes;
for (ShardRouting shardRouting : shards) {
final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
assert sourceNode != null && sourceNode.containsShard(shardRouting);
final RoutingNode routingNode = sourceNode.getRoutingNode(routingNodes);
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
if (decision.type() == Decision.Type.NO) {
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
sorter.reset(shardRouting.getIndexName());
/*
* the sorter holds the minimum weight node first for the shards index.
* We now walk through the nodes until we find a node to allocate the shard.
* This is not guaranteed to be balanced after this operation we still try best effort to
* allocate on the minimal eligible node.
*/
boolean moved = false;
for (ModelNode currentNode : modelNodes) {
if (currentNode == sourceNode) {
continue;
}
RoutingNode target = currentNode.getRoutingNode(routingNodes);
Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation);
Decision rebalanceDecision = allocation.deciders().canRebalance(shardRouting, allocation);
if (allocationDecision.type() == Type.YES && rebalanceDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
Decision sourceDecision = sourceNode.removeShard(shardRouting);
ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
// re-add (now relocating shard) to source node
sourceNode.addShard(shardRouting, sourceDecision);
Decision targetDecision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
currentNode.addShard(targetRelocatingShard, targetDecision);
if (logger.isTraceEnabled()) {
logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node());
}
moved = true;
changed = true;
break;
}
}
if (moved == false) {
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
return changed;
}
/**
* Move started shard to the minimal eligible node with respect to the weight function
*
* @return <code>true</code> if the shard was moved successfully, otherwise <code>false</code>
*/
private boolean moveShard(NodeSorter sorter, ShardRouting shardRouting, ModelNode sourceNode, RoutingNode routingNode) {
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
sorter.reset(shardRouting.getIndexName());
/*
* the sorter holds the minimum weight node first for the shards index.
* We now walk through the nodes until we find a node to allocate the shard.
* This is not guaranteed to be balanced after this operation we still try best effort to
* allocate on the minimal eligible node.
*/
for (ModelNode currentNode : sorter.modelNodes) {
if (currentNode != sourceNode) {
RoutingNode target = currentNode.getRoutingNode();
Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation);
Decision rebalanceDecision = allocation.deciders().canRebalance(shardRouting, allocation);
if (allocationDecision.type() == Type.YES && rebalanceDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
sourceNode.removeShard(shardRouting);
ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
currentNode.addShard(targetRelocatingShard);
if (logger.isTraceEnabled()) {
logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node());
}
return true;
}
}
}
return changed;
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
return false;
}
/**
@ -593,18 +547,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* on the target node which we respect during the allocation / balancing
* process. In short, this method recreates the status-quo in the cluster.
*/
private void buildModelFromAssigned(Iterable<ShardRouting> shards) {
for (ShardRouting shard : shards) {
assert shard.assignedToNode();
/* we skip relocating shards here since we expect an initializing shard with the same id coming in */
if (shard.state() == RELOCATING) {
continue;
}
ModelNode node = nodes.get(shard.currentNodeId());
assert node != null;
node.addShard(shard, Decision.single(Type.YES, "Already allocated on node", node.getNodeId()));
if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId());
private void buildModelFromAssigned() {
for (RoutingNode rn : routingNodes) {
ModelNode node = new ModelNode(rn);
nodes.put(rn.nodeId(), node);
for (ShardRouting shard : rn) {
assert rn.nodeId().equals(shard.currentNodeId());
/* we skip relocating shards here since we expect an initializing shard with the same id coming in */
if (shard.state() != RELOCATING) {
node.addShard(shard);
if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId());
}
}
}
}
}
@ -612,8 +567,11 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
/**
* Allocates all given shards on the minimal eligible node for the shards index
* with respect to the weight function. All given shards must be unassigned.
* @return <code>true</code> if the current configuration has been
* changed, otherwise <code>false</code>
*/
private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned) {
private boolean allocateUnassigned() {
RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
assert !nodes.isEmpty();
if (logger.isTraceEnabled()) {
logger.trace("Start allocating unassigned shards");
@ -657,7 +615,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
int secondaryLength = 0;
int primaryLength = primary.length;
ArrayUtil.timSort(primary, comparator);
final Set<ModelNode> throttledNodes = Collections.newSetFromMap(new IdentityHashMap<ModelNode, Boolean>());
final Set<ModelNode> throttledNodes = Collections.newSetFromMap(new IdentityHashMap<>());
do {
for (int i = 0; i < primaryLength; i++) {
ShardRouting shard = primary[i];
@ -695,7 +653,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* don't check deciders
*/
if (currentWeight <= minWeight) {
Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(routingNodes), allocation);
Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(), allocation);
NOUPDATE:
if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) {
if (currentWeight == minWeight) {
@ -736,7 +694,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
assert decision != null && minNode != null || decision == null && minNode == null;
if (minNode != null) {
minNode.addShard(shard, decision);
minNode.addShard(shard);
if (decision.type() == Type.YES) {
if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
@ -745,7 +703,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
changed = true;
continue; // don't add to ignoreUnassigned
} else {
final RoutingNode node = minNode.getRoutingNode(routingNodes);
final RoutingNode node = minNode.getRoutingNode();
if (deciders.canAllocate(node, allocation).type() != Type.YES) {
if (logger.isTraceEnabled()) {
logger.trace("Can not allocate on node [{}] remove from round decision [{}]", node, decision.type());
@ -791,10 +749,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
ShardRouting candidate = null;
final AllocationDeciders deciders = allocation.deciders();
for (ShardRouting shard : index.getAllShards()) {
for (ShardRouting shard : index) {
if (shard.started()) {
// skip initializing, unassigned and relocating shards we can't relocate them anyway
Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(routingNodes), allocation);
Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(), allocation);
Decision rebalanceDecision = deciders.canRebalance(shard, allocation);
if (((allocationDecision.type() == Type.YES) || (allocationDecision.type() == Type.THROTTLE))
&& ((rebalanceDecision.type() == Type.YES) || (rebalanceDecision.type() == Type.THROTTLE))) {
@ -815,24 +773,17 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
if (candidate != null) {
/* allocate on the model even if not throttled */
maxNode.removeShard(candidate);
minNode.addShard(candidate, decision);
minNode.addShard(candidate);
if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */
if (logger.isTraceEnabled()) {
logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(),
minNode.getNodeId());
}
/* now allocate on the cluster - if we are started we need to relocate the shard */
if (candidate.started()) {
routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
} else {
routingNodes.initialize(candidate, minNode.getNodeId(), null, allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
}
/* now allocate on the cluster */
routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
return true;
}
}
}
@ -846,14 +797,12 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
static class ModelNode implements Iterable<ModelIndex> {
private final String id;
private final Map<String, ModelIndex> indices = new HashMap<>();
private int numShards = 0;
// lazily calculated
private RoutingNode routingNode;
private final RoutingNode routingNode;
public ModelNode(String id) {
this.id = id;
public ModelNode(RoutingNode routingNode) {
this.routingNode = routingNode;
}
public ModelIndex getIndex(String indexId) {
@ -861,13 +810,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
public String getNodeId() {
return id;
return routingNode.nodeId();
}
public RoutingNode getRoutingNode(RoutingNodes routingNodes) {
if (routingNode == null) {
routingNode = routingNodes.node(id);
}
public RoutingNode getRoutingNode() {
return routingNode;
}
@ -888,33 +834,31 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return -1;
}
public void addShard(ShardRouting shard, Decision decision) {
public void addShard(ShardRouting shard) {
ModelIndex index = indices.get(shard.getIndexName());
if (index == null) {
index = new ModelIndex(shard.getIndexName());
indices.put(index.getIndexId(), index);
}
index.addShard(shard, decision);
index.addShard(shard);
numShards++;
}
public Decision removeShard(ShardRouting shard) {
public void removeShard(ShardRouting shard) {
ModelIndex index = indices.get(shard.getIndexName());
Decision removed = null;
if (index != null) {
removed = index.removeShard(shard);
if (removed != null && index.numShards() == 0) {
index.removeShard(shard);
if (index.numShards() == 0) {
indices.remove(shard.getIndexName());
}
}
numShards--;
return removed;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Node(").append(id).append(")");
sb.append("Node(").append(routingNode.nodeId()).append(")");
return sb.toString();
}
@ -930,9 +874,9 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
static final class ModelIndex {
static final class ModelIndex implements Iterable<ShardRouting> {
private final String id;
private final Map<ShardRouting, Decision> shards = new HashMap<>();
private final Set<ShardRouting> shards = new HashSet<>(4); // expect few shards of same index to be allocated on same node
private int highestPrimary = -1;
public ModelIndex(String id) {
@ -942,7 +886,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
public int highestPrimary() {
if (highestPrimary == -1) {
int maxId = -1;
for (ShardRouting shard : shards.keySet()) {
for (ShardRouting shard : shards) {
if (shard.primary()) {
maxId = Math.max(maxId, shard.id());
}
@ -960,24 +904,25 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return shards.size();
}
public Collection<ShardRouting> getAllShards() {
return shards.keySet();
@Override
public Iterator<ShardRouting> iterator() {
return shards.iterator();
}
public Decision removeShard(ShardRouting shard) {
public void removeShard(ShardRouting shard) {
highestPrimary = -1;
return shards.remove(shard);
assert shards.contains(shard) : "Shard not allocated on current node: " + shard;
shards.remove(shard);
}
public void addShard(ShardRouting shard, Decision decision) {
public void addShard(ShardRouting shard) {
highestPrimary = -1;
assert decision != null;
assert !shards.containsKey(shard) : "Shard already allocated on current node: " + shards.get(shard) + " " + shard;
shards.put(shard, decision);
assert !shards.contains(shard) : "Shard already allocated on current node: " + shard;
shards.add(shard);
}
public boolean containsShard(ShardRouting shard) {
return shards.containsKey(shard);
return shards.contains(shard);
}
}

View File

@ -19,56 +19,25 @@
package org.elasticsearch.cluster.routing.allocation.allocator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
/**
* <p>
* A {@link ShardsAllocator} is the main entry point for shard allocation on nodes in the cluster.
* The allocator makes basic decision where a shard instance will be allocated, if already allocated instances
* need relocate to other nodes due to node failures or due to rebalancing decisions.
* need to relocate to other nodes due to node failures or due to rebalancing decisions.
* </p>
*/
public interface ShardsAllocator {
/**
* Applies changes on started nodes based on the implemented algorithm. For example if a
* shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING}
* this allocator might apply some cleanups on the node that used to hold the shard.
* @param allocation all started {@link ShardRouting shards}
*/
void applyStartedShards(StartedRerouteAllocation allocation);
/**
* Applies changes on failed nodes based on the implemented algorithm.
* @param allocation all failed {@link ShardRouting shards}
*/
void applyFailedShards(FailedRerouteAllocation allocation);
/**
* Assign all unassigned shards to nodes
* Allocates shards to nodes in the cluster. An implementation of this method should:
* - assign unassigned shards
* - relocate shards that cannot stay on a node anymore
* - relocate shards to find a good shard balance in the cluster
*
* @param allocation current node allocation
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
*/
boolean allocateUnassigned(RoutingAllocation allocation);
/**
* Rebalancing number of shards on all nodes
*
* @param allocation current node allocation
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
*/
boolean rebalance(RoutingAllocation allocation);
/**
* Move started shards that can not be allocated to a node anymore
*
* @param allocation current node allocation
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
*/
boolean moveShards(RoutingAllocation allocation);
boolean allocate(RoutingAllocation allocation);
}

View File

@ -1,100 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.allocator;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.GatewayAllocator;
/**
* The {@link ShardsAllocator} class offers methods for allocating shard within a cluster.
* These methods include moving shards and re-balancing the cluster. It also allows management
* of shards by their state.
*/
public class ShardsAllocators extends AbstractComponent implements ShardsAllocator {
private final GatewayAllocator gatewayAllocator;
private final ShardsAllocator allocator;
public ShardsAllocators(GatewayAllocator allocator) {
this(Settings.Builder.EMPTY_SETTINGS, allocator);
}
public ShardsAllocators(Settings settings, GatewayAllocator allocator) {
this(settings, allocator, new BalancedShardsAllocator(settings));
}
@Inject
public ShardsAllocators(Settings settings, GatewayAllocator gatewayAllocator, ShardsAllocator allocator) {
super(settings);
this.gatewayAllocator = gatewayAllocator;
this.allocator = allocator;
}
@Override
public void applyStartedShards(StartedRerouteAllocation allocation) {
gatewayAllocator.applyStartedShards(allocation);
allocator.applyStartedShards(allocation);
}
@Override
public void applyFailedShards(FailedRerouteAllocation allocation) {
gatewayAllocator.applyFailedShards(allocation);
allocator.applyFailedShards(allocation);
}
@Override
public boolean allocateUnassigned(RoutingAllocation allocation) {
boolean changed = false;
changed |= gatewayAllocator.allocateUnassigned(allocation);
changed |= allocator.allocateUnassigned(allocation);
return changed;
}
protected long nanoTime() {
return System.nanoTime();
}
@Override
public boolean rebalance(RoutingAllocation allocation) {
if (allocation.hasPendingAsyncFetch() == false) {
/*
* see https://github.com/elastic/elasticsearch/issues/14387
* if we allow rebalance operations while we are still fetching shard store data
* we might end up with unnecessary rebalance operations which can be super confusion/frustrating
* since once the fetches come back we might just move all the shards back again.
* Therefore we only do a rebalance if we have fetched all information.
*/
return allocator.rebalance(allocation);
} else {
logger.debug("skipping rebalance due to in-flight shard/store fetches");
return false;
}
}
@Override
public boolean moveShards(RoutingAllocation allocation) {
return allocator.moveShards(allocation);
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.cluster.service;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
@ -32,19 +31,18 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.LocalNodeMasterListener;
import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.TimeoutClusterStateListener;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeService;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@ -54,7 +52,6 @@ import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.CountDown;
@ -65,9 +62,7 @@ import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.Collection;
@ -78,8 +73,6 @@ import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Queue;
import java.util.Random;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
@ -97,25 +90,15 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService {
public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
public static final Setting<Long> NODE_ID_SEED_SETTING =
// don't use node.id.seed so it won't be seen as an attribute
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
private final ThreadPool threadPool;
private BiConsumer<ClusterChangedEvent, Discovery.AckListener> clusterStatePublisher;
private final OperationRouting operationRouting;
private final TransportService transportService;
private final ClusterSettings clusterSettings;
private final DiscoveryNodeService discoveryNodeService;
private final Version version;
private final TimeValue reconnectInterval;
private TimeValue slowTaskLoggingThreshold;
@ -140,47 +123,49 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
private final ClusterBlocks.Builder initialBlocks;
private final TaskManager taskManager;
private volatile ScheduledFuture reconnectToNodes;
private NodeConnectionsService nodeConnectionsService;
@Inject
public InternalClusterService(Settings settings, OperationRouting operationRouting, TransportService transportService,
ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) {
public InternalClusterService(Settings settings, OperationRouting operationRouting,
ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName) {
super(settings);
this.operationRouting = operationRouting;
this.transportService = transportService;
this.threadPool = threadPool;
this.clusterSettings = clusterSettings;
this.discoveryNodeService = discoveryNodeService;
this.version = version;
// will be replaced on doStart.
this.clusterState = ClusterState.builder(clusterName).build();
this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold);
this.reconnectInterval = CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING.get(settings);
this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings);
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
initialBlocks = ClusterBlocks.builder();
taskManager = transportService.getTaskManager();
}
private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) {
this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
}
public void setClusterStatePublisher(BiConsumer<ClusterChangedEvent, Discovery.AckListener> publisher) {
synchronized public void setClusterStatePublisher(BiConsumer<ClusterChangedEvent, Discovery.AckListener> publisher) {
clusterStatePublisher = publisher;
}
synchronized public void setLocalNode(DiscoveryNode localNode) {
assert clusterState.nodes().localNodeId() == null : "local node is already set";
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.id());
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
}
synchronized public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) {
assert this.nodeConnectionsService == null : "nodeConnectionsService is already set";
this.nodeConnectionsService = nodeConnectionsService;
}
@Override
public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
synchronized public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
if (lifecycle.started()) {
throw new IllegalStateException("can't set initial block when started");
}
@ -188,12 +173,12 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
@Override
public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
synchronized public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
removeInitialStateBlock(block.id());
}
@Override
public void removeInitialStateBlock(int blockId) throws IllegalStateException {
synchronized public void removeInitialStateBlock(int blockId) throws IllegalStateException {
if (lifecycle.started()) {
throw new IllegalStateException("can't set initial block when started");
}
@ -201,26 +186,18 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
@Override
protected void doStart() {
synchronized protected void doStart() {
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
Objects.requireNonNull(clusterState.nodes().localNode(), "please set the local node before starting");
Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting");
add(localNodeMasterListeners);
add(taskManager);
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), threadPool.getThreadContext());
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());
Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
final String nodeId = generateNodeId(settings);
final TransportAddress publishAddress = transportService.boundAddress().publishAddress();
DiscoveryNode localNode = new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, nodeAttributes, version);
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id());
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build();
this.transportService.setLocalNode(localNode);
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
}
@Override
protected void doStop() {
FutureUtils.cancel(this.reconnectToNodes);
synchronized protected void doStop() {
for (NotifyTimeout onGoingTimeout : onGoingTimeouts) {
onGoingTimeout.cancel();
onGoingTimeout.listener.onClose();
@ -230,7 +207,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
@Override
protected void doClose() {
synchronized protected void doClose() {
}
@Override
@ -400,11 +377,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
return updateTasksExecutor.getMaxTaskWaitTime();
}
@Override
public TaskManager getTaskManager() {
return taskManager;
}
/** asserts that the current thread is the cluster state update thread */
public boolean assertClusterStateThread() {
assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread";
@ -457,18 +429,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
return;
}
ClusterStateTaskExecutor.BatchResult<T> batchResult;
long startTimeNS = System.nanoTime();
long startTimeNS = currentTimeInNanos();
try {
List<T> inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList());
batchResult = executor.execute(previousClusterState, inputs);
} catch (Throwable e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(previousClusterState.nodes().prettyPrint());
sb.append(previousClusterState.routingTable().prettyPrint());
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
logger.trace(sb.toString(), e);
logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime, previousClusterState.version(), source,
previousClusterState.nodes().prettyPrint(), previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint());
}
warnAboutSlowTaskIfNeeded(executionTime, source);
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState);
@ -509,8 +478,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
}
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
logger.debug("processing [{}]: took [{}] no change in cluster_state", source, executionTime);
warnAboutSlowTaskIfNeeded(executionTime, source);
return;
}
@ -551,9 +520,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
sb.append(newClusterState.prettyPrint());
logger.trace(sb.toString());
logger.trace("cluster state updated, source [{}]\n{}", source, newClusterState.prettyPrint());
} else if (logger.isDebugEnabled()) {
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
}
@ -568,15 +535,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
}
// TODO, do this in parallel (and wait)
for (DiscoveryNode node : nodesDelta.addedNodes()) {
try {
transportService.connectToNode(node);
} catch (Throwable e) {
// the fault detection will detect it as failed as well
logger.warn("failed to connect to node [" + node + "]", e);
}
}
nodeConnectionsService.connectToAddedNodes(clusterChangedEvent);
// if we are the master, publish the new state to all nodes
// we publish here before we send a notification to all the listeners, since if it fails
@ -612,13 +571,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
}
for (DiscoveryNode node : nodesDelta.removedNodes()) {
try {
transportService.disconnectFromNode(node);
} catch (Throwable e) {
logger.warn("failed to disconnect to node [" + node + "]", e);
}
}
nodeConnectionsService.disconnectFromRemovedNodes(clusterChangedEvent);
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
@ -649,21 +602,22 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, source);
}
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
logger.debug("processing [{}]: took [{}] done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
warnAboutSlowTaskIfNeeded(executionTime, source);
} catch (Throwable t) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint());
sb.append(newClusterState.getRoutingNodes().prettyPrint());
logger.warn(sb.toString(), t);
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}{}{}", t, executionTime,
newClusterState.version(), newClusterState.stateUUID(), source, newClusterState.nodes().prettyPrint(),
newClusterState.routingTable().prettyPrint(), newClusterState.getRoutingNodes().prettyPrint());
// TODO: do we want to call updateTask.onFailure here?
}
}
// this one is overridden in tests so we can control time
protected long currentTimeInNanos() {return System.nanoTime();}
private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, ESLogger logger) {
if (listener instanceof AckedClusterStateTaskListener) {
return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger);
@ -777,7 +731,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
private void warnAboutSlowTaskIfNeeded(TimeValue executionTime, String source) {
if (executionTime.getMillis() > slowTaskLoggingThreshold.getMillis()) {
logger.warn("cluster state update task [{}] took {} above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold);
logger.warn("cluster state update task [{}] took [{}] above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold);
}
}
@ -809,64 +763,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
}
private class ReconnectToNodes implements Runnable {
private ConcurrentMap<DiscoveryNode, Integer> failureCount = ConcurrentCollections.newConcurrentMap();
@Override
public void run() {
// master node will check against all nodes if its alive with certain discoveries implementations,
// but we can't rely on that, so we check on it as well
for (DiscoveryNode node : clusterState.nodes()) {
if (lifecycle.stoppedOrClosed()) {
return;
}
if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time...
if (!transportService.nodeConnected(node)) {
try {
transportService.connectToNode(node);
} catch (Exception e) {
if (lifecycle.stoppedOrClosed()) {
return;
}
if (clusterState.nodes().nodeExists(node.id())) { // double check here as well, maybe its gone?
Integer nodeFailureCount = failureCount.get(node);
if (nodeFailureCount == null) {
nodeFailureCount = 1;
} else {
nodeFailureCount = nodeFailureCount + 1;
}
// log every 6th failure
if ((nodeFailureCount % 6) == 0) {
// reset the failure count...
nodeFailureCount = 0;
logger.warn("failed to reconnect to node {}", e, node);
}
failureCount.put(node, nodeFailureCount);
}
}
}
}
}
// go over and remove failed nodes that have been removed
DiscoveryNodes nodes = clusterState.nodes();
for (Iterator<DiscoveryNode> failedNodesIt = failureCount.keySet().iterator(); failedNodesIt.hasNext(); ) {
DiscoveryNode failedNode = failedNodesIt.next();
if (!nodes.nodeExists(failedNode.id())) {
failedNodesIt.remove();
}
}
if (lifecycle.started()) {
reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this);
}
}
}
public static String generateNodeId(Settings settings) {
Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
return Strings.randomBase64UUID(random);
}
private static class LocalNodeMasterListeners implements ClusterStateListener {
private final List<LocalNodeMasterListener> listeners = new CopyOnWriteArrayList<>();

View File

@ -0,0 +1,33 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation to suppress logging usage checks errors inside a whole class or a method.
*/
@Retention(RetentionPolicy.CLASS)
@Target({ ElementType.CONSTRUCTOR, ElementType.METHOD, ElementType.TYPE })
public @interface SuppressLoggerChecks {
String reason();
}

View File

@ -93,7 +93,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
final String message = "[" + this.name + "] Data too large, data for [" +
fieldName + "] would be larger than limit of [" +
memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]";
logger.debug(message);
logger.debug("{}", message);
throw new CircuitBreakingException(message,
bytesNeeded, this.memoryBytesLimit);
}

View File

@ -81,7 +81,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
this.trippedCount.incrementAndGet();
final String message = "Data too large, data for field [" + fieldName + "] would be larger than limit of [" +
memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]";
logger.debug(message);
logger.debug("{}", message);
throw new CircuitBreakingException(message);
}

View File

@ -314,7 +314,7 @@ public class PolygonBuilder extends ShapeBuilder {
double shiftOffset = any.coordinate.x > DATELINE ? DATELINE : (any.coordinate.x < -DATELINE ? -DATELINE : 0);
if (debugEnabled()) {
LOGGER.debug("shift: {[]}", shiftOffset);
LOGGER.debug("shift: [{}]", shiftOffset);
}
// run along the border of the component, collect the
@ -392,9 +392,9 @@ public class PolygonBuilder extends ShapeBuilder {
if(debugEnabled()) {
for (int i = 0; i < result.length; i++) {
LOGGER.debug("Component {[]}:", i);
LOGGER.debug("Component [{}]:", i);
for (int j = 0; j < result[i].length; j++) {
LOGGER.debug("\t" + Arrays.toString(result[i][j]));
LOGGER.debug("\t{}", Arrays.toString(result[i][j]));
}
}
}
@ -444,7 +444,7 @@ public class PolygonBuilder extends ShapeBuilder {
// is an arbitrary point of the hole. The polygon edge next to this point
// is part of the polygon the hole belongs to.
if (debugEnabled()) {
LOGGER.debug("Holes: " + Arrays.toString(holes));
LOGGER.debug("Holes: {}", Arrays.toString(holes));
}
for (int i = 0; i < numHoles; i++) {
final Edge current = new Edge(holes[i].coordinate, holes[i].next);
@ -464,9 +464,9 @@ public class PolygonBuilder extends ShapeBuilder {
final int component = -edges[index].component - numHoles - 1;
if(debugEnabled()) {
LOGGER.debug("\tposition ("+index+") of edge "+current+": " + edges[index]);
LOGGER.debug("\tComponent: " + component);
LOGGER.debug("\tHole intersections ("+current.coordinate.x+"): " + Arrays.toString(edges));
LOGGER.debug("\tposition ({}) of edge {}: {}", index, current, edges[index]);
LOGGER.debug("\tComponent: {}", component);
LOGGER.debug("\tHole intersections ({}): {}", current.coordinate.x, Arrays.toString(edges));
}
components.get(component).add(points[i]);

View File

@ -37,6 +37,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.ingest.IngestStats;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.search.aggregations.AggregatorBuilder;
@ -552,6 +553,14 @@ public abstract class StreamInput extends InputStream {
}
}
public <T extends Writeable> T readOptionalWritable(T prototype) throws IOException {
if (readBoolean()) {
return (T) prototype.readFrom(this);
} else {
return null;
}
}
public <T extends Throwable> T readThrowable() throws IOException {
if (readBoolean()) {
int key = readVInt();

View File

@ -520,6 +520,15 @@ public abstract class StreamOutput extends OutputStream {
}
}
public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOException {
if (writeable != null) {
writeBoolean(true);
writeable.writeTo(this);
} else {
writeBoolean(false);
}
}
public void writeThrowable(Throwable throwable) throws IOException {
if (throwable == null) {
writeBoolean(false);

View File

@ -19,6 +19,8 @@
package org.elasticsearch.common.logging;
import org.elasticsearch.common.SuppressLoggerChecks;
/**
* A logger that logs deprecation notices.
*/
@ -45,6 +47,7 @@ public class DeprecationLogger {
/**
* Logs a deprecated message.
*/
@SuppressLoggerChecks(reason = "safely delegates to logger")
public void deprecated(String msg, Object... params) {
logger.debug(msg, params);
}

View File

@ -111,7 +111,7 @@ public class Lucene {
try {
return Version.parse(version);
} catch (ParseException e) {
logger.warn("no version match {}, default to {}", version, defaultVersion, e);
logger.warn("no version match {}, default to {}", e, version, defaultVersion);
return defaultVersion;
}
}
@ -235,11 +235,7 @@ public class Lucene {
@Override
protected Object doBody(String segmentFileName) throws IOException {
try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) {
final int format = input.readInt();
if (format == CodecUtil.CODEC_MAGIC) {
CodecUtil.checksumEntireFile(input);
}
// legacy....
CodecUtil.checksumEntireFile(input);
}
return null;
}

View File

@ -31,14 +31,14 @@ import java.net.SocketException;
import java.util.List;
import java.util.Locale;
/**
/**
* Simple class to log {@code ifconfig}-style output at DEBUG logging.
*/
final class IfConfig {
private static final ESLogger logger = Loggers.getLogger(IfConfig.class);
private static final ESLogger logger = Loggers.getLogger(IfConfig.class);
private static final String INDENT = " ";
/** log interface configuration at debug level, if its enabled */
static void logIfNecessary() {
if (logger.isDebugEnabled()) {
@ -49,7 +49,7 @@ final class IfConfig {
}
}
}
/** perform actual logging: might throw exception if things go wrong */
private static void doLogging() throws IOException {
StringBuilder msg = new StringBuilder();
@ -59,14 +59,14 @@ final class IfConfig {
// ordinary name
msg.append(nic.getName());
msg.append(System.lineSeparator());
// display name (e.g. on windows)
if (!nic.getName().equals(nic.getDisplayName())) {
msg.append(INDENT);
msg.append(nic.getDisplayName());
msg.append(System.lineSeparator());
}
// addresses: v4 first, then v6
List<InterfaceAddress> addresses = nic.getInterfaceAddresses();
for (InterfaceAddress address : addresses) {
@ -76,7 +76,7 @@ final class IfConfig {
msg.append(System.lineSeparator());
}
}
for (InterfaceAddress address : addresses) {
if (address.getAddress() instanceof Inet6Address) {
msg.append(INDENT);
@ -84,7 +84,7 @@ final class IfConfig {
msg.append(System.lineSeparator());
}
}
// hardware address
byte hardware[] = nic.getHardwareAddress();
if (hardware != null) {
@ -98,19 +98,19 @@ final class IfConfig {
}
msg.append(System.lineSeparator());
}
// attributes
msg.append(INDENT);
msg.append(formatFlags(nic));
msg.append(System.lineSeparator());
}
logger.debug("configuration:" + System.lineSeparator() + "{}", msg.toString());
logger.debug("configuration:{}{}", System.lineSeparator(), msg);
}
/** format internet address: java's default doesn't include everything useful */
private static String formatAddress(InterfaceAddress interfaceAddress) throws IOException {
StringBuilder sb = new StringBuilder();
InetAddress address = interfaceAddress.getAddress();
if (address instanceof Inet6Address) {
sb.append("inet6 ");
@ -122,10 +122,10 @@ final class IfConfig {
sb.append(NetworkAddress.formatAddress(address));
int netmask = 0xFFFFFFFF << (32 - interfaceAddress.getNetworkPrefixLength());
sb.append(" netmask:" + NetworkAddress.formatAddress(InetAddress.getByAddress(new byte[] {
(byte)(netmask >>> 24),
(byte)(netmask >>> 16 & 0xFF),
(byte)(netmask >>> 8 & 0xFF),
(byte)(netmask & 0xFF)
(byte)(netmask >>> 24),
(byte)(netmask >>> 16 & 0xFF),
(byte)(netmask >>> 8 & 0xFF),
(byte)(netmask & 0xFF)
})));
InetAddress broadcast = interfaceAddress.getBroadcast();
if (broadcast != null) {
@ -141,7 +141,7 @@ final class IfConfig {
}
return sb.toString();
}
/** format network interface flags */
private static String formatFlags(NetworkInterface nic) throws SocketException {
StringBuilder flags = new StringBuilder();

View File

@ -29,8 +29,10 @@ import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.InternalClusterInfoService;
import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodeService;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
@ -259,7 +261,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
TransportService.TRACE_LOG_INCLUDE_SETTING,
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
Transport.TRANSPORT_TCP_COMPRESS,
@ -326,7 +328,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
Environment.PATH_SCRIPTS_SETTING,
Environment.PATH_SHARED_DATA_SETTING,
Environment.PIDFILE_SETTING,
InternalClusterService.NODE_ID_SEED_SETTING,
DiscoveryNodeService.NODE_ID_SEED_SETTING,
DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING,
DiscoveryModule.DISCOVERY_TYPE_SETTING,
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,

View File

@ -20,7 +20,10 @@
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.lease.Releasable;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
@ -29,9 +32,8 @@ import java.util.concurrent.locks.ReentrantLock;
* created the first time they are acquired and removed if no thread hold the
* lock. The latter is important to assure that the list of locks does not grow
* infinitely.
*
* A Thread can acquire a lock only once.
*
*
*
* */
public class KeyedLock<T> {
@ -50,48 +52,38 @@ public class KeyedLock<T> {
private final ConcurrentMap<T, KeyLock> map = ConcurrentCollections.newConcurrentMap();
protected final ThreadLocal<KeyLock> threadLocal = new ThreadLocal<>();
public void acquire(T key) {
public Releasable acquire(T key) {
assert isHeldByCurrentThread(key) == false : "lock for " + key + " is already heald by this thread";
while (true) {
if (threadLocal.get() != null) {
// if we are here, the thread already has the lock
throw new IllegalStateException("Lock already acquired in Thread" + Thread.currentThread().getId()
+ " for key " + key);
}
KeyLock perNodeLock = map.get(key);
if (perNodeLock == null) {
KeyLock newLock = new KeyLock(fair);
perNodeLock = map.putIfAbsent(key, newLock);
if (perNodeLock == null) {
newLock.lock();
threadLocal.set(newLock);
return;
return new ReleasableLock(key, newLock);
}
}
assert perNodeLock != null;
int i = perNodeLock.count.get();
if (i > 0 && perNodeLock.count.compareAndSet(i, i + 1)) {
perNodeLock.lock();
threadLocal.set(perNodeLock);
return;
return new ReleasableLock(key, perNodeLock);
}
}
}
public void release(T key) {
KeyLock lock = threadLocal.get();
public boolean isHeldByCurrentThread(T key) {
KeyLock lock = map.get(key);
if (lock == null) {
throw new IllegalStateException("Lock not acquired");
return false;
}
release(key, lock);
return lock.isHeldByCurrentThread();
}
void release(T key, KeyLock lock) {
assert lock.isHeldByCurrentThread();
assert lock == map.get(key);
lock.unlock();
threadLocal.set(null);
int decrementAndGet = lock.count.decrementAndGet();
if (decrementAndGet == 0) {
map.remove(key, lock);
@ -99,6 +91,24 @@ public class KeyedLock<T> {
}
private final class ReleasableLock implements Releasable {
final T key;
final KeyLock lock;
final AtomicBoolean closed = new AtomicBoolean();
private ReleasableLock(T key, KeyLock lock) {
this.key = key;
this.lock = lock;
}
@Override
public void close() {
if (closed.compareAndSet(false, true)) {
release(key, lock);
}
}
}
@SuppressWarnings("serial")
private final static class KeyLock extends ReentrantLock {
KeyLock(boolean fair) {

View File

@ -823,7 +823,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return null;
}
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("full ping responses:");
StringBuilder sb = new StringBuilder();
if (fullPingResponses.length == 0) {
sb.append(" {none}");
} else {
@ -831,7 +831,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
sb.append("\n\t--> ").append(pingResponse);
}
}
logger.trace(sb.toString());
logger.trace("full ping responses:{}", sb);
}
// filter responses
@ -848,7 +848,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
}
if (logger.isDebugEnabled()) {
StringBuilder sb = new StringBuilder("filtered ping responses: (filter_client[").append(masterElectionFilterClientNodes).append("], filter_data[").append(masterElectionFilterDataNodes).append("])");
StringBuilder sb = new StringBuilder();
if (pingResponses.isEmpty()) {
sb.append(" {none}");
} else {
@ -856,7 +856,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
sb.append("\n\t--> ").append(pingResponse);
}
}
logger.debug(sb.toString());
logger.debug("filtered ping responses: (filter_client[{}], filter_data[{}]){}", masterElectionFilterClientNodes,
masterElectionFilterDataNodes, sb);
}
final DiscoveryNode localNode = clusterService.localNode();
@ -918,7 +919,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// *** called from within an cluster state update task *** //
assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME);
logger.warn(reason + ", current nodes: {}", clusterState.nodes());
logger.warn("{}, current nodes: {}", reason, clusterState.nodes());
nodesFD.stop();
masterFD.stop(reason);

View File

@ -250,7 +250,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
// We do some I/O in here, so skip this if DEBUG/INFO are not enabled:
if (logger.isDebugEnabled()) {
// Log one line per path.data:
StringBuilder sb = new StringBuilder("node data locations details:");
StringBuilder sb = new StringBuilder();
for (NodePath nodePath : nodePaths) {
sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath());
@ -278,7 +278,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
.append(fsPath.getType())
.append(']');
}
logger.debug(sb.toString());
logger.debug("node data locations details:{}", sb);
} else if (logger.isInfoEnabled()) {
FsInfo.Path totFSPath = new FsInfo.Path();
Set<String> allTypes = new HashSet<>();
@ -306,14 +306,8 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
}
// Just log a 1-line summary:
logger.info(String.format(Locale.ROOT,
"using [%d] data paths, mounts [%s], net usable_space [%s], net total_space [%s], spins? [%s], types [%s]",
nodePaths.length,
allMounts,
totFSPath.getAvailable(),
totFSPath.getTotal(),
toString(allSpins),
toString(allTypes)));
logger.info("using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], spins? [{}], types [{}]",
nodePaths.length, allMounts, totFSPath.getAvailable(), totFSPath.getTotal(), toString(allSpins), toString(allTypes));
}
}

View File

@ -202,7 +202,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
try (DirectoryStream<Path> stream = Files.newDirectoryStream(stateLocation)) {
for (Path stateFile : stream) {
if (logger.isTraceEnabled()) {
logger.trace("[upgrade]: processing [" + stateFile.getFileName() + "]");
logger.trace("[upgrade]: processing [{}]", stateFile.getFileName());
}
final String name = stateFile.getFileName().toString();
if (name.startsWith("metadata-")) {

View File

@ -161,11 +161,14 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
if (state.nodes().masterNodeId() == null) {
logger.debug("not recovering from gateway, no master elected yet");
} else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]");
logger.debug("not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]",
nodes.masterAndDataNodes().size(), recoverAfterNodes);
} else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) {
logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]");
logger.debug("not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]",
nodes.dataNodes().size(), recoverAfterDataNodes);
} else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) {
logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]");
logger.debug("not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]",
nodes.masterNodes().size(), recoverAfterMasterNodes);
} else {
boolean enforceRecoverAfterTime;
String reason;

View File

@ -262,7 +262,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
// validate max content length
if (maxContentLength.bytes() > Integer.MAX_VALUE) {
logger.warn("maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]");
logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength);
maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
}
this.maxContentLength = maxContentLength;

View File

@ -36,6 +36,7 @@ import java.util.concurrent.TimeUnit;
/**
*/
public final class IndexingSlowLog implements IndexingOperationListener {
private final Index index;
private boolean reformat;
private long indexWarnThreshold;
private long indexInfoThreshold;
@ -51,7 +52,6 @@ public final class IndexingSlowLog implements IndexingOperationListener {
private SlowLogLevel level;
private final ESLogger indexLogger;
private final ESLogger deleteLogger;
private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog";
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
@ -75,16 +75,8 @@ public final class IndexingSlowLog implements IndexingOperationListener {
}, true, Setting.Scope.INDEX);
IndexingSlowLog(IndexSettings indexSettings) {
this(indexSettings, Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"),
Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".delete"));
}
/**
* Build with the specified loggers. Only used to testing.
*/
IndexingSlowLog(IndexSettings indexSettings, ESLogger indexLogger, ESLogger deleteLogger) {
this.indexLogger = indexLogger;
this.deleteLogger = deleteLogger;
this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings());
this.index = indexSettings.getIndex();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat);
this.reformat = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING);
@ -109,7 +101,6 @@ public final class IndexingSlowLog implements IndexingOperationListener {
private void setLevel(SlowLogLevel level) {
this.level = level;
this.indexLogger.setLevel(level.name());
this.deleteLogger.setLevel(level.name());
}
private void setWarnThreshold(TimeValue warnThreshold) {
@ -141,13 +132,13 @@ public final class IndexingSlowLog implements IndexingOperationListener {
private void postIndexing(ParsedDocument doc, long tookInNanos) {
if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) {
indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog));
indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
} else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) {
indexLogger.info("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog));
indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
} else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) {
indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog));
indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
} else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) {
indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog));
indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
}
}
@ -156,9 +147,11 @@ public final class IndexingSlowLog implements IndexingOperationListener {
private final long tookInNanos;
private final boolean reformat;
private final int maxSourceCharsToLog;
private final Index index;
SlowLogParsedDocumentPrinter(ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) {
SlowLogParsedDocumentPrinter(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) {
this.doc = doc;
this.index = index;
this.tookInNanos = tookInNanos;
this.reformat = reformat;
this.maxSourceCharsToLog = maxSourceCharsToLog;
@ -167,6 +160,7 @@ public final class IndexingSlowLog implements IndexingOperationListener {
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(index).append(" ");
sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], ");
sb.append("type[").append(doc.type()).append("], ");
sb.append("id[").append(doc.id()).append("], ");

View File

@ -155,7 +155,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
// because analyzers are aliased, they might be closed several times
// an NPE is thrown in this case, so ignore....
} catch (Exception e) {
logger.debug("failed to close analyzer " + analyzer);
logger.debug("failed to close analyzer {}", analyzer);
}
}
}

View File

@ -129,9 +129,9 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
merge.rateLimiter.getMBPerSec());
if (tookMS > 20000) { // if more than 20 seconds, DEBUG log it
logger.debug(message);
logger.debug("{}", message);
} else if (logger.isTraceEnabled()) {
logger.trace(message);
logger.trace("{}", message);
}
}
}

View File

@ -671,7 +671,7 @@ public abstract class Engine implements Closeable {
closeNoLock("engine failed on: [" + reason + "]");
} finally {
if (failedEngine != null) {
logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", reason, failure);
logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", failure, reason);
return;
}
logger.warn("failed engine [{}]", failure, reason);
@ -697,7 +697,7 @@ public abstract class Engine implements Closeable {
store.decRef();
}
} else {
logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason, failure);
logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", failure, reason);
}
}

View File

@ -20,10 +20,14 @@
package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.FieldComparatorSource;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.Weight;
@ -122,11 +126,11 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
public static class Nested {
private final BitSetProducer rootFilter;
private final Weight innerFilter;
private final Query innerQuery;
public Nested(BitSetProducer rootFilter, Weight innerFilter) {
public Nested(BitSetProducer rootFilter, Query innerQuery) {
this.rootFilter = rootFilter;
this.innerFilter = innerFilter;
this.innerQuery = innerQuery;
}
/**
@ -140,7 +144,10 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
* Get a {@link DocIdSet} that matches the inner documents.
*/
public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException {
Scorer s = innerFilter.scorer(ctx);
final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx);
IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx);
Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false);
Scorer s = weight.scorer(ctx);
return s == null ? null : s.iterator();
}
}

View File

@ -230,13 +230,13 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
IndexFieldData.Builder builder = null;
String format = type.getFormat(indexSettings.getSettings());
if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) {
logger.warn("field [" + fieldName + "] has no doc values, will use default field data format");
logger.warn("field [{}] has no doc values, will use default field data format", fieldName);
format = null;
}
if (format != null) {
builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
if (builder == null) {
logger.warn("failed to find format [" + format + "] for field [" + fieldName + "], will use default");
logger.warn("failed to find format [{}] for field [{}], will use default", format, fieldName);
}
}
if (builder == null && docValues) {

View File

@ -19,8 +19,14 @@
package org.elasticsearch.index.mapper;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.util.CloseableThreadLocal;
import org.elasticsearch.common.Strings;
@ -48,15 +54,8 @@ import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.mapper.object.RootObjectMapper;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/** A parser for documents, given mappings from a DocumentMapper */
class DocumentParser implements Closeable {
final class DocumentParser implements Closeable {
private CloseableThreadLocal<ParseContext.InternalParseContext> cache = new CloseableThreadLocal<ParseContext.InternalParseContext>() {
@Override
@ -99,7 +98,7 @@ class DocumentParser implements Closeable {
reverseOrder(context);
ParsedDocument doc = parsedDocument(source, context, update(context, mapping));
ParsedDocument doc = parsedDocument(source, context, createDynamicUpdate(mapping, docMapper, context.getDynamicMappers()));
// reset the context to free up memory
context.reset(null, null, null);
return doc;
@ -116,10 +115,7 @@ class DocumentParser implements Closeable {
// entire type is disabled
parser.skipChildren();
} else if (emptyDoc == false) {
Mapper update = parseObject(context, mapping.root, true);
if (update != null) {
context.addDynamicMappingsUpdate(update);
}
parseObjectOrNested(context, mapping.root, true);
}
for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) {
@ -201,11 +197,6 @@ class DocumentParser implements Closeable {
}
private static Mapping update(ParseContext.InternalParseContext context, Mapping mapping) {
Mapper rootDynamicUpdate = context.dynamicMappingsUpdate();
return rootDynamicUpdate != null ? mapping.mappingUpdate(rootDynamicUpdate) : null;
}
private static MapperParsingException wrapInMapperParsingException(SourceToParse source, Throwable e) {
// if its already a mapper parsing exception, no need to wrap it...
if (e instanceof MapperParsingException) {
@ -220,10 +211,156 @@ class DocumentParser implements Closeable {
return new MapperParsingException("failed to parse", e);
}
static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper, boolean atRoot) throws IOException {
/** Creates a Mapping containing any dynamically added fields, or returns null if there were no dynamic mappings. */
static Mapping createDynamicUpdate(Mapping mapping, DocumentMapper docMapper, List<Mapper> dynamicMappers) {
if (dynamicMappers.isEmpty()) {
return null;
}
// We build a mapping by first sorting the mappers, so that all mappers containing a common prefix
// will be processed in a contiguous block. When the prefix is no longer seen, we pop the extra elements
// off the stack, merging them upwards into the existing mappers.
Collections.sort(dynamicMappers, (Mapper o1, Mapper o2) -> o1.name().compareTo(o2.name()));
Iterator<Mapper> dynamicMapperItr = dynamicMappers.iterator();
List<ObjectMapper> parentMappers = new ArrayList<>();
Mapper firstUpdate = dynamicMapperItr.next();
parentMappers.add(createUpdate(mapping.root(), firstUpdate.name().split("\\."), 0, firstUpdate));
Mapper previousMapper = null;
while (dynamicMapperItr.hasNext()) {
Mapper newMapper = dynamicMapperItr.next();
if (previousMapper != null && newMapper.name().equals(previousMapper.name())) {
// We can see the same mapper more than once, for example, if we had foo.bar and foo.baz, where
// foo did not yet exist. This will create 2 copies in dynamic mappings, which should be identical.
// Here we just skip over the duplicates, but we merge them to ensure there are no conflicts.
newMapper.merge(previousMapper, false);
continue;
}
previousMapper = newMapper;
String[] nameParts = newMapper.name().split("\\.");
// We first need the stack to only contain mappers in common with the previously processed mapper
// For example, if the first mapper processed was a.b.c, and we now have a.d, the stack will contain
// a.b, and we want to merge b back into the stack so it just contains a
int i = removeUncommonMappers(parentMappers, nameParts);
// Then we need to add back mappers that may already exist within the stack, but are not on it.
// For example, if we processed a.b, followed by an object mapper a.c.d, and now are adding a.c.d.e
// then the stack will only have a on it because we will have already merged a.c.d into the stack.
// So we need to pull a.c, followed by a.c.d, onto the stack so e can be added to the end.
i = expandCommonMappers(parentMappers, nameParts, i);
// If there are still parents of the new mapper which are not on the stack, we need to pull them
// from the existing mappings. In order to maintain the invariant that the stack only contains
// fields which are updated, we cannot simply add the existing mappers to the stack, since they
// may have other subfields which will not be updated. Instead, we pull the mapper from the existing
// mappings, and build an update with only the new mapper and its parents. This then becomes our
// "new mapper", and can be added to the stack.
if (i < nameParts.length - 1) {
newMapper = createExistingMapperUpdate(parentMappers, nameParts, i, docMapper, newMapper);
}
if (newMapper instanceof ObjectMapper) {
parentMappers.add((ObjectMapper)newMapper);
} else {
addToLastMapper(parentMappers, newMapper, true);
}
}
popMappers(parentMappers, 1, true);
assert parentMappers.size() == 1;
return mapping.mappingUpdate(parentMappers.get(0));
}
private static void popMappers(List<ObjectMapper> parentMappers, int keepBefore, boolean merge) {
assert keepBefore >= 1; // never remove the root mapper
// pop off parent mappers not needed by the current mapper,
// merging them backwards since they are immutable
for (int i = parentMappers.size() - 1; i >= keepBefore; --i) {
addToLastMapper(parentMappers, parentMappers.remove(i), merge);
}
}
/**
* Adds a mapper as an update into the last mapper. If merge is true, the new mapper
* will be merged in with other child mappers of the last parent, otherwise it will be a new update.
*/
private static void addToLastMapper(List<ObjectMapper> parentMappers, Mapper mapper, boolean merge) {
assert parentMappers.size() >= 1;
int lastIndex = parentMappers.size() - 1;
ObjectMapper withNewMapper = parentMappers.get(lastIndex).mappingUpdate(mapper);
if (merge) {
withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper, false);
}
parentMappers.set(lastIndex, withNewMapper);
}
/**
* Removes mappers that exist on the stack, but are not part of the path of the current nameParts,
* Returns the next unprocessed index from nameParts.
*/
private static int removeUncommonMappers(List<ObjectMapper> parentMappers, String[] nameParts) {
int keepBefore = 1;
while (keepBefore < parentMappers.size() &&
parentMappers.get(keepBefore).simpleName().equals(nameParts[keepBefore - 1])) {
++keepBefore;
}
popMappers(parentMappers, keepBefore, true);
return keepBefore - 1;
}
/**
* Adds mappers from the end of the stack that exist as updates within those mappers.
* Returns the next unprocessed index from nameParts.
*/
private static int expandCommonMappers(List<ObjectMapper> parentMappers, String[] nameParts, int i) {
ObjectMapper last = parentMappers.get(parentMappers.size() - 1);
while (i < nameParts.length - 1 && last.getMapper(nameParts[i]) != null) {
Mapper newLast = last.getMapper(nameParts[i]);
assert newLast instanceof ObjectMapper;
last = (ObjectMapper) newLast;
parentMappers.add(last);
++i;
}
return i;
}
/** Creates an update for intermediate object mappers that are not on the stack, but parents of newMapper. */
private static ObjectMapper createExistingMapperUpdate(List<ObjectMapper> parentMappers, String[] nameParts, int i,
DocumentMapper docMapper, Mapper newMapper) {
String updateParentName = nameParts[i];
final ObjectMapper lastParent = parentMappers.get(parentMappers.size() - 1);
if (parentMappers.size() > 1) {
// only prefix with parent mapper if the parent mapper isn't the root (which has a fake name)
updateParentName = lastParent.name() + '.' + nameParts[i];
}
ObjectMapper updateParent = docMapper.objectMappers().get(updateParentName);
assert updateParent != null : updateParentName + " doesn't exist";
return createUpdate(updateParent, nameParts, i + 1, newMapper);
}
/** Build an update for the parent which will contain the given mapper and any intermediate fields. */
private static ObjectMapper createUpdate(ObjectMapper parent, String[] nameParts, int i, Mapper mapper) {
List<ObjectMapper> parentMappers = new ArrayList<>();
ObjectMapper previousIntermediate = parent;
for (; i < nameParts.length - 1; ++i) {
Mapper intermediate = previousIntermediate.getMapper(nameParts[i]);
assert intermediate != null : "Field " + previousIntermediate.name() + " does not have a subfield " + nameParts[i];
assert intermediate instanceof ObjectMapper;
parentMappers.add((ObjectMapper)intermediate);
previousIntermediate = (ObjectMapper)intermediate;
}
if (parentMappers.isEmpty() == false) {
// add the new mapper to the stack, and pop down to the original parent level
addToLastMapper(parentMappers, mapper, false);
popMappers(parentMappers, 1, false);
mapper = parentMappers.get(0);
}
return parent.mappingUpdate(mapper);
}
static void parseObjectOrNested(ParseContext context, ObjectMapper mapper, boolean atRoot) throws IOException {
if (mapper.isEnabled() == false) {
context.parser().skipChildren();
return null;
return;
}
XContentParser parser = context.parser();
@ -234,7 +371,7 @@ class DocumentParser implements Closeable {
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.VALUE_NULL) {
// the object is null ("obj1" : null), simply bail
return null;
return;
}
if (token.isValue()) {
@ -256,21 +393,19 @@ class DocumentParser implements Closeable {
}
ObjectMapper update = null;
update = innerParseObject(context, mapper, parser, currentFieldName, token, update);
innerParseObject(context, mapper, parser, currentFieldName, token);
// restore the enable path flag
if (nested.isNested()) {
nested(context, nested);
}
return update;
}
private static ObjectMapper innerParseObject(ParseContext context, ObjectMapper mapper, XContentParser parser, String currentFieldName, XContentParser.Token token, ObjectMapper update) throws IOException {
private static void innerParseObject(ParseContext context, ObjectMapper mapper, XContentParser parser, String currentFieldName, XContentParser.Token token) throws IOException {
while (token != XContentParser.Token.END_OBJECT) {
ObjectMapper newUpdate = null;
if (token == XContentParser.Token.START_OBJECT) {
newUpdate = parseObject(context, mapper, currentFieldName);
parseObject(context, mapper, currentFieldName);
} else if (token == XContentParser.Token.START_ARRAY) {
newUpdate = parseArray(context, mapper, currentFieldName);
parseArray(context, mapper, currentFieldName);
} else if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NULL) {
@ -278,18 +413,10 @@ class DocumentParser implements Closeable {
} else if (token == null) {
throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but got EOF, has a concrete value been provided to it?");
} else if (token.isValue()) {
newUpdate = parseValue(context, mapper, currentFieldName, token);
parseValue(context, mapper, currentFieldName, token);
}
token = parser.nextToken();
if (newUpdate != null) {
if (update == null) {
update = newUpdate;
} else {
update = update.merge(newUpdate, false);
}
}
}
return update;
}
private static void nested(ParseContext context, ObjectMapper.Nested nested) {
@ -335,33 +462,29 @@ class DocumentParser implements Closeable {
return context;
}
private static Mapper parseObjectOrField(ParseContext context, Mapper mapper) throws IOException {
private static void parseObjectOrField(ParseContext context, Mapper mapper) throws IOException {
if (mapper instanceof ObjectMapper) {
return parseObject(context, (ObjectMapper) mapper, false);
parseObjectOrNested(context, (ObjectMapper) mapper, false);
} else {
FieldMapper fieldMapper = (FieldMapper)mapper;
Mapper update = fieldMapper.parse(context);
if (update != null) {
context.addDynamicMapper(update);
}
if (fieldMapper.copyTo() != null) {
parseCopyFields(context, fieldMapper, fieldMapper.copyTo().copyToFields());
}
return update;
}
}
private static ObjectMapper parseObject(final ParseContext context, ObjectMapper mapper, String currentFieldName) throws IOException {
if (currentFieldName == null) {
throw new MapperParsingException("object mapping [" + mapper.name() + "] trying to serialize an object with no field associated with it, current value [" + context.parser().textOrNull() + "]");
}
assert currentFieldName != null;
context.path().add(currentFieldName);
ObjectMapper update = null;
Mapper objectMapper = mapper.getMapper(currentFieldName);
if (objectMapper != null) {
final Mapper subUpdate = parseObjectOrField(context, objectMapper);
if (subUpdate != null) {
// propagate mapping update
update = mapper.mappingUpdate(subUpdate);
}
parseObjectOrField(context, objectMapper);
} else {
ObjectMapper.Dynamic dynamic = mapper.dynamic();
if (dynamic == null) {
@ -382,8 +505,9 @@ class DocumentParser implements Closeable {
}
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
objectMapper = builder.build(builderContext);
context.addDynamicMapper(objectMapper);
context.path().add(currentFieldName);
update = mapper.mappingUpdate(parseAndMergeUpdate(objectMapper, context));
parseObjectOrField(context, objectMapper);
} else {
// not dynamic, read everything up to end object
context.parser().skipChildren();
@ -394,7 +518,7 @@ class DocumentParser implements Closeable {
return update;
}
private static ObjectMapper parseArray(ParseContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException {
private static void parseArray(ParseContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException {
String arrayFieldName = lastFieldName;
Mapper mapper = parentMapper.getMapper(lastFieldName);
if (mapper != null) {
@ -402,15 +526,9 @@ class DocumentParser implements Closeable {
// expects an array, if so we pass the context straight to the mapper and if not
// we serialize the array components
if (mapper instanceof ArrayValueMapperParser) {
final Mapper subUpdate = parseObjectOrField(context, mapper);
if (subUpdate != null) {
// propagate the mapping update
return parentMapper.mappingUpdate(subUpdate);
} else {
return null;
}
parseObjectOrField(context, mapper);
} else {
return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
}
} else {
@ -423,31 +541,34 @@ class DocumentParser implements Closeable {
} else if (dynamic == ObjectMapper.Dynamic.TRUE) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, "object");
if (builder == null) {
return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
return;
}
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
mapper = builder.build(builderContext);
if (mapper != null && mapper instanceof ArrayValueMapperParser) {
assert mapper != null;
if (mapper instanceof ArrayValueMapperParser) {
context.addDynamicMapper(mapper);
context.path().add(arrayFieldName);
mapper = parseAndMergeUpdate(mapper, context);
return parentMapper.mappingUpdate(mapper);
parseObjectOrField(context, mapper);
} else {
return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
}
} else {
return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
// TODO: shouldn't this skip, not parse?
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
}
}
}
private static ObjectMapper parseNonDynamicArray(ParseContext context, ObjectMapper mapper, String lastFieldName, String arrayFieldName) throws IOException {
private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapper, String lastFieldName, String arrayFieldName) throws IOException {
XContentParser parser = context.parser();
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.START_OBJECT) {
return parseObject(context, mapper, lastFieldName);
parseObject(context, mapper, lastFieldName);
} else if (token == XContentParser.Token.START_ARRAY) {
return parseArray(context, mapper, lastFieldName);
parseArray(context, mapper, lastFieldName);
} else if (token == XContentParser.Token.FIELD_NAME) {
lastFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NULL) {
@ -455,25 +576,20 @@ class DocumentParser implements Closeable {
} else if (token == null) {
throw new MapperParsingException("object mapping for [" + mapper.name() + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?");
} else {
return parseValue(context, mapper, lastFieldName, token);
parseValue(context, mapper, lastFieldName, token);
}
}
return null;
}
private static ObjectMapper parseValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException {
private static void parseValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException {
if (currentFieldName == null) {
throw new MapperParsingException("object mapping [" + parentMapper.name() + "] trying to serialize a value with no field associated with it, current value [" + context.parser().textOrNull() + "]");
}
Mapper mapper = parentMapper.getMapper(currentFieldName);
if (mapper != null) {
Mapper subUpdate = parseObjectOrField(context, mapper);
if (subUpdate == null) {
return null;
}
return parentMapper.mappingUpdate(subUpdate);
parseObjectOrField(context, mapper);
} else {
return parseDynamicValue(context, parentMapper, currentFieldName, token);
parseDynamicValue(context, parentMapper, currentFieldName, token);
}
}
@ -641,7 +757,7 @@ class DocumentParser implements Closeable {
throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]");
}
private static ObjectMapper parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException {
private static void parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException {
ObjectMapper.Dynamic dynamic = parentMapper.dynamic();
if (dynamic == null) {
dynamic = dynamicOrDefault(context.root().dynamic());
@ -650,7 +766,7 @@ class DocumentParser implements Closeable {
throw new StrictDynamicMappingException(parentMapper.fullPath(), currentFieldName);
}
if (dynamic == ObjectMapper.Dynamic.FALSE) {
return null;
return;
}
final String path = context.path().pathAsText(currentFieldName);
final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
@ -668,14 +784,9 @@ class DocumentParser implements Closeable {
// try to not introduce a conflict
mapper = mapper.updateFieldType(Collections.singletonMap(path, existingFieldType));
}
context.addDynamicMapper(mapper);
mapper = parseAndMergeUpdate(mapper, context);
ObjectMapper update = null;
if (mapper != null) {
update = parentMapper.mappingUpdate(mapper);
}
return update;
parseObjectOrField(context, mapper);
}
/** Creates instances of the fields that the current field should be copied to */
@ -713,8 +824,9 @@ class DocumentParser implements Closeable {
// The path of the dest field might be completely different from the current one so we need to reset it
context = context.overridePath(new ContentPath(0));
String[] paths = Strings.splitStringToArray(field, '.');
String fieldName = paths[paths.length-1];
// TODO: why Strings.splitStringToArray instead of String.split?
final String[] paths = Strings.splitStringToArray(field, '.');
final String fieldName = paths[paths.length-1];
ObjectMapper mapper = context.root();
ObjectMapper[] mappers = new ObjectMapper[paths.length-1];
if (paths.length > 1) {
@ -745,6 +857,7 @@ class DocumentParser implements Closeable {
if (mapper.nested() != ObjectMapper.Nested.NO) {
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`");
}
context.addDynamicMapper(mapper);
break;
case FALSE:
// Maybe we should log something to tell the user that the copy_to is ignored in this case.
@ -759,36 +872,10 @@ class DocumentParser implements Closeable {
parent = mapper;
}
}
ObjectMapper update = parseDynamicValue(context, mapper, fieldName, context.parser().currentToken());
assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping
if (paths.length > 1) {
for (int i = paths.length - 2; i >= 0; i--) {
ObjectMapper parent = context.root();
if (i > 0) {
parent = mappers[i-1];
}
assert parent != null;
update = parent.mappingUpdate(update);
}
}
context.addDynamicMappingsUpdate(update);
parseDynamicValue(context, mapper, fieldName, context.parser().currentToken());
}
}
/**
* Parse the given {@code context} with the given {@code mapper} and apply
* the potential mapping update in-place. This method is useful when
* composing mapping updates.
*/
private static <M extends Mapper> M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException {
final Mapper update = parseObjectOrField(context, mapper);
if (update != null) {
mapper = (M) mapper.merge(update, false);
}
return mapper;
}
private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper.Dynamic dynamic) {
return dynamic == null ? ObjectMapper.Dynamic.TRUE : dynamic;
}

View File

@ -374,7 +374,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
// this can happen if this mapper represents a mapping update
return this;
} else if (fieldType.getClass() != newFieldType.getClass()) {
throw new IllegalStateException("Mixing up field types: " + fieldType.getClass() + " != " + newFieldType.getClass());
throw new IllegalStateException("Mixing up field types: " +
fieldType.getClass() + " != " + newFieldType.getClass() + " on field " + fieldType.name());
}
MultiFields updatedMultiFields = multiFields.updateFieldType(fullNameToFieldType);
if (fieldType == newFieldType && multiFields == updatedMultiFields) {

View File

@ -76,6 +76,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
return this.name;
}
/** Returns a newly built mapper. */
public abstract Y build(BuilderContext context);
}

View File

@ -331,13 +331,13 @@ public abstract class ParseContext {
}
@Override
public void addDynamicMappingsUpdate(Mapper update) {
in.addDynamicMappingsUpdate(update);
public void addDynamicMapper(Mapper update) {
in.addDynamicMapper(update);
}
@Override
public Mapper dynamicMappingsUpdate() {
return in.dynamicMappingsUpdate();
public List<Mapper> getDynamicMappers() {
return in.getDynamicMappers();
}
}
@ -369,7 +369,7 @@ public abstract class ParseContext {
private AllEntries allEntries = new AllEntries();
private Mapper dynamicMappingsUpdate = null;
private List<Mapper> dynamicMappers = new ArrayList<>();
public InternalParseContext(@Nullable Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, ContentPath path) {
this.indexSettings = indexSettings;
@ -394,7 +394,7 @@ public abstract class ParseContext {
this.source = source == null ? null : sourceToParse.source();
this.path.reset();
this.allEntries = new AllEntries();
this.dynamicMappingsUpdate = null;
this.dynamicMappers = new ArrayList<>();
}
@Override
@ -536,18 +536,13 @@ public abstract class ParseContext {
}
@Override
public void addDynamicMappingsUpdate(Mapper mapper) {
assert mapper instanceof RootObjectMapper : mapper;
if (dynamicMappingsUpdate == null) {
dynamicMappingsUpdate = mapper;
} else {
dynamicMappingsUpdate = dynamicMappingsUpdate.merge(mapper, false);
}
public void addDynamicMapper(Mapper mapper) {
dynamicMappers.add(mapper);
}
@Override
public Mapper dynamicMappingsUpdate() {
return dynamicMappingsUpdate;
public List<Mapper> getDynamicMappers() {
return dynamicMappers;
}
}
@ -747,12 +742,12 @@ public abstract class ParseContext {
public abstract StringBuilder stringBuilder();
/**
* Add a dynamic update to the root object mapper.
* Add a new mapper dynamically created while parsing.
*/
public abstract void addDynamicMappingsUpdate(Mapper update);
public abstract void addDynamicMapper(Mapper update);
/**
* Get dynamic updates to the root object mapper.
* Get dynamic mappers created while parsing.
*/
public abstract Mapper dynamicMappingsUpdate();
public abstract List<Mapper> getDynamicMappers();
}

View File

@ -26,6 +26,9 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
@ -39,9 +42,12 @@ import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.apache.lucene.index.IndexOptions.NONE;
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
@ -52,6 +58,11 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
public static final String CONTENT_TYPE = "string";
private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1;
private static final Set<String> SUPPORTED_PARAMETERS_FOR_AUTO_UPGRADE = new HashSet<>(Arrays.asList(
"type",
// most common parameters, for which the upgrade is straightforward
"index", "store", "doc_values", "omit_norms", "norms", "fields", "copy_to"));
public static class Defaults {
public static final MappedFieldType FIELD_TYPE = new StringFieldType();
@ -130,13 +141,33 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
}
public static class TypeParser implements Mapper.TypeParser {
private final DeprecationLogger deprecationLogger;
public TypeParser() {
ESLogger logger = Loggers.getLogger(getClass());
this.deprecationLogger = new DeprecationLogger(logger);
}
@Override
public Mapper.Builder parse(String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
// TODO: temporarily disabled to give Kibana time to upgrade to text/keyword mappings
/*if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
// Automatically upgrade simple mappings for ease of upgrade, otherwise fail
if (SUPPORTED_PARAMETERS_FOR_AUTO_UPGRADE.containsAll(node.keySet())) {
deprecationLogger.deprecated("The [string] field is deprecated, please use [text] or [keyword] instead on [{}]",
fieldName);
final Object index = node.remove("index");
final boolean keyword = index != null && "analyzed".equals(index) == false;
// upgrade the index setting
node.put("index", "no".equals(index) == false);
if (keyword) {
return new KeywordFieldMapper.TypeParser().parse(fieldName, node, parserContext);
} else {
return new TextFieldMapper.TypeParser().parse(fieldName, node, parserContext);
}
}
throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] "
+ "or [keyword] field instead for field [" + fieldName + "]");
}*/
}
StringFieldMapper.Builder builder = new StringFieldMapper.Builder(fieldName);
// hack for the fact that string can't just accept true/false for
// the index property and still accepts no/not_analyzed/analyzed
@ -241,11 +272,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
int positionIncrementGap, int ignoreAbove,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
// TODO: temporarily disabled to give Kibana time to upgrade to text/keyword mappings
/*if (Version.indexCreated(indexSettings).onOrAfter(Version.V_5_0_0)) {
if (Version.indexCreated(indexSettings).onOrAfter(Version.V_5_0_0)) {
throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] "
+ "or [keyword] field instead for field [" + fieldType.name() + "]");
}*/
}
if (fieldType.tokenized() && fieldType.indexOptions() != NONE && fieldType().hasDocValues()) {
throw new MapperParsingException("Field [" + fieldType.name() + "] cannot be analyzed and have doc values");
}

View File

@ -25,7 +25,9 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.loader.SettingsLoader;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
@ -39,11 +41,14 @@ import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.similarity.SimilarityProvider;
import org.elasticsearch.index.similarity.SimilarityService;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.isArray;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
@ -63,10 +68,18 @@ public class TypeParsers {
public static final String INDEX_OPTIONS_POSITIONS = "positions";
public static final String INDEX_OPTIONS_OFFSETS = "offsets";
private static boolean nodeBooleanValue(Object node, Mapper.TypeParser.ParserContext parserContext) {
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TypeParsers.class));
private static final Set<String> BOOLEAN_STRINGS = new HashSet<>(Arrays.asList("true", "false"));
private static boolean nodeBooleanValue(String name, Object node, Mapper.TypeParser.ParserContext parserContext) {
// Hook onto ParseFieldMatcher so that parsing becomes strict when setting index.query.parse.strict
if (parserContext.parseFieldMatcher().isStrict()) {
return XContentMapValues.nodeBooleanValue(node);
} else {
// TODO: remove this leniency in 6.0
if (BOOLEAN_STRINGS.contains(node.toString()) == false) {
DEPRECATION_LOGGER.deprecated("Expected a boolean for property [{}] but got [{}]", name, node);
}
return XContentMapValues.lenientNodeBooleanValue(node);
}
}
@ -81,13 +94,13 @@ public class TypeParsers {
builder.precisionStep(nodeIntegerValue(propNode));
iterator.remove();
} else if (propName.equals("ignore_malformed")) {
builder.ignoreMalformed(nodeBooleanValue(propNode, parserContext));
builder.ignoreMalformed(nodeBooleanValue("ignore_malformed", propNode, parserContext));
iterator.remove();
} else if (propName.equals("coerce")) {
builder.coerce(nodeBooleanValue(propNode, parserContext));
builder.coerce(nodeBooleanValue("coerce", propNode, parserContext));
iterator.remove();
} else if (propName.equals("omit_norms")) {
builder.omitNorms(nodeBooleanValue(propNode, parserContext));
builder.omitNorms(nodeBooleanValue("omit_norms", propNode, parserContext));
iterator.remove();
} else if (propName.equals("similarity")) {
SimilarityProvider similarityProvider = resolveSimilarity(parserContext, name, propNode.toString());
@ -112,16 +125,16 @@ public class TypeParsers {
parseTermVector(name, propNode.toString(), builder);
iterator.remove();
} else if (propName.equals("store_term_vectors")) {
builder.storeTermVectors(nodeBooleanValue(propNode, parserContext));
builder.storeTermVectors(nodeBooleanValue("store_term_vectors", propNode, parserContext));
iterator.remove();
} else if (propName.equals("store_term_vector_offsets")) {
builder.storeTermVectorOffsets(nodeBooleanValue(propNode, parserContext));
builder.storeTermVectorOffsets(nodeBooleanValue("store_term_vector_offsets", propNode, parserContext));
iterator.remove();
} else if (propName.equals("store_term_vector_positions")) {
builder.storeTermVectorPositions(nodeBooleanValue(propNode, parserContext));
builder.storeTermVectorPositions(nodeBooleanValue("store_term_vector_positions", propNode, parserContext));
iterator.remove();
} else if (propName.equals("store_term_vector_payloads")) {
builder.storeTermVectorPayloads(nodeBooleanValue(propNode, parserContext));
builder.storeTermVectorPayloads(nodeBooleanValue("store_term_vector_payloads", propNode, parserContext));
iterator.remove();
} else if (propName.equals("analyzer")) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
@ -199,13 +212,13 @@ public class TypeParsers {
builder.index(parseIndex(name, propNode.toString(), parserContext));
iterator.remove();
} else if (propName.equals(DOC_VALUES)) {
builder.docValues(nodeBooleanValue(propNode, parserContext));
builder.docValues(nodeBooleanValue(DOC_VALUES, propNode, parserContext));
iterator.remove();
} else if (propName.equals("boost")) {
builder.boost(nodeFloatValue(propNode));
iterator.remove();
} else if (propName.equals("omit_norms")) {
builder.omitNorms(nodeBooleanValue(propNode, parserContext));
builder.omitNorms(nodeBooleanValue("omit_norms", propNode, parserContext));
iterator.remove();
} else if (propName.equals("norms")) {
final Map<String, Object> properties = nodeMapValue(propNode, "norms");
@ -227,7 +240,7 @@ public class TypeParsers {
builder.indexOptions(nodeIndexOptionValue(propNode));
iterator.remove();
} else if (propName.equals("include_in_all")) {
builder.includeInAll(nodeBooleanValue(propNode, parserContext));
builder.includeInAll(nodeBooleanValue("include_in_all", propNode, parserContext));
iterator.remove();
} else if (propName.equals("similarity")) {
SimilarityProvider similarityProvider = resolveSimilarity(parserContext, name, propNode.toString());
@ -243,7 +256,7 @@ public class TypeParsers {
(indexVersionCreated.after(Version.V_2_0_1) && indexVersionCreated.before(Version.V_2_1_0))) {
throw new MapperParsingException("copy_to in multi fields is not allowed. Found the copy_to in field [" + name + "] which is within a multi field.");
} else {
ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [" + name + "] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping.");
ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [{}] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping.", name);
}
} else {
parseCopyFields(propNode, builder);
@ -353,35 +366,32 @@ public class TypeParsers {
}
public static boolean parseIndex(String fieldName, String index, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException {
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
switch (index) {
case "true":
return true;
case "false":
return false;
default:
switch (index) {
case "true":
return true;
case "false":
return false;
case "not_analyzed":
case "analyzed":
case "no":
if (parserContext.parseFieldMatcher().isStrict() == false) {
DEPRECATION_LOGGER.deprecated("Expected a boolean for property [index] but got [{}]", index);
return "no".equals(index) == false;
} else {
throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true] or [false]");
}
} else {
final String normalizedIndex = Strings.toUnderscoreCase(index);
switch (normalizedIndex) {
case "true":
case "not_analyzed":
case "analyzed":
return true;
case "false":
case "no":
return false;
default:
throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true], [false], [no], [not_analyzed] or [analyzed]");
}
default:
throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true] or [false]");
}
}
public static boolean parseStore(String fieldName, String store, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException {
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
if (parserContext.parseFieldMatcher().isStrict()) {
return XContentMapValues.nodeBooleanValue(store);
} else {
if (BOOLEAN_STRINGS.contains(store) == false) {
DEPRECATION_LOGGER.deprecated("Expected a boolean for property [store] but got [{}]", store);
}
if ("no".equals(store)) {
return false;
} else if ("yes".equals(store)) {

View File

@ -65,7 +65,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
public static final String LON = "lon";
public static final String LON_SUFFIX = "." + LON;
public static final String GEOHASH = "geohash";
public static final String GEOHASH_SUFFIX = "." + GEOHASH;
public static final String IGNORE_MALFORMED = "ignore_malformed";
}

View File

@ -250,7 +250,8 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
GeoPoint luceneTopLeft = new GeoPoint(topLeft);
GeoPoint luceneBottomRight = new GeoPoint(bottomRight);
if (GeoValidationMethod.isCoerce(validationMethod)) {
final Version indexVersionCreated = context.indexVersionCreated();
if (indexVersionCreated.onOrAfter(Version.V_2_2_0) || GeoValidationMethod.isCoerce(validationMethod)) {
// Special case: if the difference between the left and right is 360 and the right is greater than the left, we are asking for
// the complete longitude range so need to set longitude to the complete longitude range
double right = luceneBottomRight.getLon();
@ -265,7 +266,6 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
}
}
final Version indexVersionCreated = context.indexVersionCreated();
if (indexVersionCreated.onOrAfter(Version.V_2_2_0)) {
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
// if index created V_2_3 > use prefix encoded postings format

View File

@ -219,18 +219,18 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
throw new QueryShardException(shardContext, "field [" + fieldName + "] is not a geo_point field");
}
final Version indexVersionCreated = shardContext.indexVersionCreated();
QueryValidationException exception = checkLatLon(shardContext.indexVersionCreated().before(Version.V_2_0_0));
if (exception != null) {
throw new QueryShardException(shardContext, "couldn't validate latitude/ longitude values", exception);
}
if (GeoValidationMethod.isCoerce(validationMethod)) {
if (indexVersionCreated.onOrAfter(Version.V_2_2_0) || GeoValidationMethod.isCoerce(validationMethod)) {
GeoUtils.normalizePoint(center, true, true);
}
double normDistance = geoDistance.normalize(this.distance, DistanceUnit.DEFAULT);
final Version indexVersionCreated = shardContext.indexVersionCreated();
if (indexVersionCreated.before(Version.V_2_2_0)) {
GeoPointFieldMapperLegacy.GeoPointFieldType geoFieldType = ((GeoPointFieldMapperLegacy.GeoPointFieldType) fieldType);
IndexGeoPointFieldData indexFieldData = shardContext.getForField(fieldType);

View File

@ -120,9 +120,6 @@ public class GeoDistanceQueryParser implements QueryParser<GeoDistanceQueryBuild
} else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.LON_SUFFIX)) {
point.resetLon(parser.doubleValue());
fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.LON_SUFFIX.length());
} else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.GEOHASH_SUFFIX)) {
point.resetFromGeoHash(parser.text());
fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
queryName = parser.text();
} else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {

View File

@ -236,7 +236,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
}
GeoPoint point = new GeoPoint(this.point);
if (GeoValidationMethod.isCoerce(validationMethod)) {
if (indexCreatedBeforeV2_2 == false || GeoValidationMethod.isCoerce(validationMethod)) {
GeoUtils.normalizePoint(point, true, true);
}

View File

@ -196,15 +196,6 @@ public class GeoDistanceRangeQueryParser implements QueryParser<GeoDistanceRange
point = new GeoPoint();
}
point.resetLon(parser.doubleValue());
} else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.GEOHASH_SUFFIX)) {
String maybeFieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length());
if (fieldName == null || fieldName.equals(maybeFieldName)) {
fieldName = maybeFieldName;
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + GeoDistanceRangeQueryBuilder.NAME +
"] field name already set to [" + fieldName + "] but found [" + currentFieldName + "]");
}
point = GeoPoint.fromGeohash(parser.text());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, NAME_FIELD)) {
queryName = parser.text();
} else if (parseContext.parseFieldMatcher().match(currentFieldName, BOOST_FIELD)) {

View File

@ -131,13 +131,13 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
}
}
if (GeoValidationMethod.isCoerce(validationMethod)) {
final Version indexVersionCreated = context.indexVersionCreated();
if (indexVersionCreated.onOrAfter(Version.V_2_2_0) || GeoValidationMethod.isCoerce(validationMethod)) {
for (GeoPoint point : shell) {
GeoUtils.normalizePoint(point, true, true);
}
}
final Version indexVersionCreated = context.indexVersionCreated();
if (indexVersionCreated.before(Version.V_2_2_0)) {
IndexGeoPointFieldData indexFieldData = context.getForField(fieldType);
return new GeoPolygonQuery(indexFieldData, shell.toArray(new GeoPoint[shellSize]));

View File

@ -31,7 +31,6 @@ import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
@ -61,8 +60,8 @@ public class NestedInnerQueryParseSupport {
protected ObjectMapper nestedObjectMapper;
private ObjectMapper parentObjectMapper;
public NestedInnerQueryParseSupport(XContentParser parser, SearchContext searchContext) {
shardContext = searchContext.getQueryShardContext();
public NestedInnerQueryParseSupport(XContentParser parser, QueryShardContext context) {
shardContext = context;
parseContext = shardContext.parseContext();
shardContext.reset(parser);

View File

@ -49,7 +49,7 @@ import java.util.Map;
* be stored as payloads to numeric doc values.
*/
public final class ElasticsearchMergePolicy extends MergePolicy {
private static ESLogger logger = Loggers.getLogger(ElasticsearchMergePolicy.class);
private final MergePolicy delegate;
@ -69,7 +69,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
/** Return an "upgraded" view of the reader. */
static CodecReader filter(CodecReader reader) throws IOException {
// TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid?
// TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid?
// the previous code never did this, so some indexes carry around trash.
return reader;
}
@ -155,7 +155,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
// TODO: Use IndexUpgradeMergePolicy instead. We should be comparing codecs,
// for now we just assume every minor upgrade has a new format.
logger.debug("Adding segment " + info.info.name + " to be upgraded");
logger.debug("Adding segment {} to be upgraded", info.info.name);
spec.add(new OneMerge(Collections.singletonList(info)));
}
@ -163,14 +163,14 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
if (spec.merges.size() == MAX_CONCURRENT_UPGRADE_MERGES) {
// hit our max upgrades, so return the spec. we will get a cascaded call to continue.
logger.debug("Returning " + spec.merges.size() + " merges for upgrade");
logger.debug("Returning {} merges for upgrade", spec.merges.size());
return spec;
}
}
// We must have less than our max upgrade merges, so the next return will be our last in upgrading mode.
if (spec.merges.isEmpty() == false) {
logger.debug("Returning " + spec.merges.size() + " merges for end of upgrade");
logger.debug("Returning {} merges for end of upgrade", spec.merges.size());
return spec;
}

View File

@ -712,7 +712,7 @@ public class IndexShard extends AbstractIndexShardComponent {
false, true, upgrade.upgradeOnlyAncientSegments());
org.apache.lucene.util.Version version = minimumCompatibleVersion();
if (logger.isTraceEnabled()) {
logger.trace("upgraded segment {} from version {} to version {}", previousVersion, version);
logger.trace("upgraded segments for {} from version {} to version {}", shardId, previousVersion, version);
}
return version;

View File

@ -128,9 +128,8 @@ final class StoreRecovery {
assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
sb.append("recovery completed from ").append("shard_store").append(", took [").append(timeValueMillis(recoveryState.getTimer().time())).append("]\n");
RecoveryState.Index index = recoveryState.getIndex();
StringBuilder sb = new StringBuilder();
sb.append(" index : files [").append(index.totalFileCount()).append("] with total_size [")
.append(new ByteSizeValue(index.totalBytes())).append("], took[")
.append(TimeValue.timeValueMillis(index.time())).append("]\n");
@ -142,7 +141,7 @@ final class StoreRecovery {
.append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations())
.append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]");
logger.trace(sb.toString());
logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb);
} else if (logger.isDebugEnabled()) {
logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time()));
}

View File

@ -379,7 +379,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
if (isClosed.compareAndSet(false, true)) {
// only do this once!
decRef();
logger.debug("store reference count on close: " + refCounter.refCount());
logger.debug("store reference count on close: {}", refCounter.refCount());
}
}

View File

@ -103,6 +103,7 @@ import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import static java.util.Collections.emptyMap;
import static java.util.Collections.unmodifiableMap;
@ -185,14 +186,14 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown"));
// Copy indices because we modify it asynchronously in the body of the loop
Set<String> indices = new HashSet<>(this.indices.keySet());
final Set<Index> indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet());
final CountDownLatch latch = new CountDownLatch(indices.size());
for (final String index : indices) {
for (final Index index : indices) {
indicesStopExecutor.execute(() -> {
try {
removeIndex(index, "shutdown", false);
} catch (Throwable e) {
logger.warn("failed to remove index on stop [" + index + "]", e);
logger.warn("failed to remove index on stop [{}]", e, index);
} finally {
latch.countDown();
}
@ -256,7 +257,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
}
Map<Index, List<IndexShardStats>> statsByShard = new HashMap<>();
for (IndexService indexService : indices.values()) {
for (IndexService indexService : this) {
for (IndexShard indexShard : indexService) {
try {
if (indexShard.routingEntry() == null) {
@ -290,17 +291,8 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
return indices.values().iterator();
}
public boolean hasIndex(String index) {
return indices.containsKey(index);
}
/**
* Returns an IndexService for the specified index if exists otherwise returns <code>null</code>.
*
*/
@Nullable
public IndexService indexService(String index) {
return indices.get(index);
public boolean hasIndex(Index index) {
return indices.containsKey(index.getUUID());
}
/**
@ -309,33 +301,21 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
*/
@Nullable
public IndexService indexService(Index index) {
return indexService(index.getName());
}
/**
* Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown.
*/
public IndexService indexServiceSafe(String index) {
IndexService indexService = indexService(index);
if (indexService == null) {
throw new IndexNotFoundException(index);
}
return indexService;
return indices.get(index.getUUID());
}
/**
* Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown.
*/
public IndexService indexServiceSafe(Index index) {
IndexService indexService = indexServiceSafe(index.getName());
if (indexService.indexUUID().equals(index.getUUID()) == false) {
IndexService indexService = indices.get(index.getUUID());
if (indexService == null) {
throw new IndexNotFoundException(index);
}
assert indexService.indexUUID().equals(index.getUUID()) : "uuid mismatch local: " + indexService.indexUUID() + " incoming: " + index.getUUID();
return indexService;
}
/**
* Creates a new {@link IndexService} for the given metadata.
* @param indexMetaData the index metadata to create the index for
@ -346,10 +326,13 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
if (!lifecycle.started()) {
throw new IllegalStateException("Can't create an index [" + indexMetaData.getIndex() + "], node is closed");
}
if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) {
throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]");
}
final Index index = indexMetaData.getIndex();
final Predicate<String> indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state());
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting);
if (indices.containsKey(index.getName())) {
if (hasIndex(index)) {
throw new IndexAlreadyExistsException(index);
}
logger.debug("creating Index [{}], shards [{}]/[{}{}]",
@ -378,7 +361,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
try {
assert indexService.getIndexEventListener() == listener;
listener.afterIndexCreated(indexService);
indices = newMapBuilder(indices).put(index.getName(), indexService).immutableMap();
indices = newMapBuilder(indices).put(index.getUUID(), indexService).immutableMap();
success = true;
return indexService;
} finally {
@ -395,22 +378,24 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
* @param index the index to remove
* @param reason the high level reason causing this removal
*/
public void removeIndex(String index, String reason) {
public void removeIndex(Index index, String reason) {
removeIndex(index, reason, false);
}
private void removeIndex(String index, String reason, boolean delete) {
private void removeIndex(Index index, String reason, boolean delete) {
final String indexName = index.getName();
try {
final IndexService indexService;
final IndexEventListener listener;
synchronized (this) {
if (indices.containsKey(index) == false) {
if (hasIndex(index) == false) {
return;
}
logger.debug("[{}] closing ... (reason [{}])", index, reason);
logger.debug("[{}] closing ... (reason [{}])", indexName, reason);
Map<String, IndexService> newIndices = new HashMap<>(indices);
indexService = newIndices.remove(index);
indexService = newIndices.remove(index.getUUID());
assert indexService != null : "IndexService is null for index: " + index;
indices = unmodifiableMap(newIndices);
listener = indexService.getIndexEventListener();
}
@ -419,9 +404,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
if (delete) {
listener.beforeIndexDeleted(indexService);
}
logger.debug("[{}] closing index service (reason [{}])", index, reason);
logger.debug("{} closing index service (reason [{}])", index, reason);
indexService.close(reason, delete);
logger.debug("[{}] closed... (reason [{}])", index, reason);
logger.debug("{} closed... (reason [{}])", index, reason);
listener.afterIndexClosed(indexService.index(), indexService.getIndexSettings().getSettings());
if (delete) {
final IndexSettings indexSettings = indexService.getIndexSettings();
@ -474,12 +459,12 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
* Deletes the given index. Persistent parts of the index
* like the shards files, state and transaction logs are removed once all resources are released.
*
* Equivalent to {@link #removeIndex(String, String)} but fires
* Equivalent to {@link #removeIndex(Index, String)} but fires
* different lifecycle events to ensure pending resources of this index are immediately removed.
* @param index the index to delete
* @param reason the high level reason causing this delete
*/
public void deleteIndex(String index, String reason) throws IOException {
public void deleteIndex(Index index, String reason) throws IOException {
removeIndex(index, reason, true);
}
@ -505,16 +490,17 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState, boolean closed) throws IOException {
if (nodeEnv.hasNodeFile()) {
synchronized (this) {
String indexName = metaData.getIndex().getName();
if (indices.containsKey(indexName)) {
String localUUid = indices.get(indexName).indexUUID();
throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]");
Index index = metaData.getIndex();
if (hasIndex(index)) {
String localUUid = indexService(index).indexUUID();
throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]");
}
if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) {
if (clusterState.metaData().hasIndex(index.getName()) && (clusterState.nodes().localNode().masterNode() == true)) {
// we do not delete the store if it is a master eligible node and the index is still in the cluster state
// because we want to keep the meta data for indices around even if no shards are left here
final IndexMetaData index = clusterState.metaData().index(indexName);
throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
final IndexMetaData idxMeta = clusterState.metaData().index(index.getName());
throw new IllegalStateException("Can't delete closed index store for [" + index.getName() + "] - it's still part of the cluster state [" + idxMeta.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
}
}
final IndexSettings indexSettings = buildIndexSettings(metaData);
@ -607,7 +593,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
* @return true if the index can be deleted on this node
*/
public boolean canDeleteIndexContents(Index index, IndexSettings indexSettings, boolean closed) {
final IndexService indexService = this.indices.get(index.getName());
final IndexService indexService = indexService(index);
// Closed indices may be deleted, even if they are on a shared
// filesystem. Since it is closed we aren't deleting it for relocation
if (indexSettings.isOnSharedFilesystem() == false || closed) {
@ -634,7 +620,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
*/
public boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) {
assert shardId.getIndex().equals(indexSettings.getIndex());
final IndexService indexService = this.indices.get(shardId.getIndexName());
final IndexService indexService = indexService(shardId.getIndex());
if (indexSettings.isOnSharedFilesystem() == false) {
if (indexService != null && nodeEnv.hasNodeFile()) {
return indexService.hasShard(shardId.id()) == false;

View File

@ -46,6 +46,7 @@ import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexShardAlreadyExistsException;
@ -157,13 +158,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// TODO: this feels a bit hacky here, a block disables state persistence, and then we clean the allocated shards, maybe another flag in blocks?
if (event.state().blocks().disableStatePersistence()) {
for (IndexService indexService : indicesService) {
String index = indexService.index().getName();
Index index = indexService.index();
for (Integer shardId : indexService.shardIds()) {
logger.debug("[{}][{}] removing shard (disabled block persistence)", index, shardId);
logger.debug("{}[{}] removing shard (disabled block persistence)", index, shardId);
try {
indexService.removeShard(shardId, "removing shard (disabled block persistence)");
} catch (Throwable e) {
logger.warn("[{}] failed to remove shard (disabled block persistence)", e, index);
logger.warn("{} failed to remove shard (disabled block persistence)", e, index);
}
}
removeIndex(index, "cleaning index (disabled block persistence)");
@ -201,10 +202,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
}
for (IndexService indexService : indicesService) {
String index = indexService.index().getName();
Index index = indexService.index();
if (indexService.shardIds().isEmpty()) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index (no shards allocated)", index);
logger.debug("{} cleaning index (no shards allocated)", index);
}
// clean the index
removeIndex(index, "removing index (no shards allocated)");
@ -222,12 +223,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (indexMetaData != null) {
if (!indexMetaData.isSameUUID(indexService.indexUUID())) {
logger.debug("[{}] mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated", indexMetaData.getIndex());
deleteIndex(indexMetaData.getIndex().getName(), "mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated");
deleteIndex(indexMetaData.getIndex(), "mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated");
}
}
}
for (String index : event.indicesDeleted()) {
for (Index index : event.indicesDeleted()) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index, no longer part of the metadata", index);
}
@ -243,7 +244,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
indicesService.deleteClosedIndex("closed index no longer part of the metadata", metaData, event.state());
}
try {
nodeIndexDeletedAction.nodeIndexDeleted(event.state(), index, indexSettings, localNodeId);
nodeIndexDeletedAction.nodeIndexDeleted(event.state(), index.getName(), indexSettings, localNodeId);
} catch (Throwable e) {
logger.debug("failed to send to master index {} deleted event", e, index);
}
@ -298,7 +299,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
return;
}
for (ShardRouting shard : routingNode) {
if (!indicesService.hasIndex(shard.getIndexName())) {
if (!indicesService.hasIndex(shard.index())) {
final IndexMetaData indexMetaData = event.state().metaData().index(shard.index());
if (logger.isDebugEnabled()) {
logger.debug("[{}] creating index", indexMetaData.getIndex());
@ -317,7 +318,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
return;
}
for (IndexMetaData indexMetaData : event.state().metaData()) {
if (!indicesService.hasIndex(indexMetaData.getIndex().getName())) {
if (!indicesService.hasIndex(indexMetaData.getIndex())) {
// we only create / update here
continue;
}
@ -325,7 +326,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (!event.indexMetaDataChanged(indexMetaData)) {
continue;
}
String index = indexMetaData.getIndex().getName();
Index index = indexMetaData.getIndex();
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
// already deleted on us, ignore it
@ -339,12 +340,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private void applyMappings(ClusterChangedEvent event) {
// go over and update mappings
for (IndexMetaData indexMetaData : event.state().metaData()) {
if (!indicesService.hasIndex(indexMetaData.getIndex().getName())) {
Index index = indexMetaData.getIndex();
if (!indicesService.hasIndex(index)) {
// we only create / update here
continue;
}
boolean requireRefresh = false;
String index = indexMetaData.getIndex().getName();
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
// got deleted on us, ignore (closing the node)
@ -357,11 +358,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
MappingMetaData mappingMd = cursor.value;
String mappingType = mappingMd.type();
CompressedXContent mappingSource = mappingMd.source();
requireRefresh |= processMapping(index, mapperService, mappingType, mappingSource);
requireRefresh |= processMapping(index.getName(), mapperService, mappingType, mappingSource);
}
if (requireRefresh && sendRefreshMapping) {
nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.getIndexUUID(),
new NodeMappingRefreshAction.NodeMappingRefreshRequest(index.getName(), indexMetaData.getIndexUUID(),
event.state().nodes().localNodeId())
);
}
@ -727,7 +728,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
}
private void removeIndex(String index, String reason) {
private void removeIndex(Index index, String reason) {
try {
indicesService.removeIndex(index, reason);
} catch (Throwable e) {
@ -735,7 +736,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
}
private void deleteIndex(String index, String reason) {
private void deleteIndex(Index index, String reason) {
try {
indicesService.deleteIndex(index, reason);
} catch (Throwable e) {
@ -774,7 +775,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private class FailedShardHandler implements Callback<IndexShard.ShardFailure> {
@Override
public void handle(final IndexShard.ShardFailure shardFailure) {
final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex().getName());
final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex());
final ShardRouting shardRouting = shardFailure.routing;
threadPool.generic().execute(() -> {
synchronized (mutex) {

View File

@ -309,7 +309,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
}
final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId());
if (expectedCommitId == null) {
logger.trace("{} can't resolve expected commit id for {}, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
logger.trace("{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush"));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue;

View File

@ -238,7 +238,7 @@ public class RecoveriesCollection {
return;
}
lastSeenAccessTime = accessTime;
logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", lastSeenAccessTime);
logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", recoveryId, lastSeenAccessTime);
threadPool.schedule(checkInterval, ThreadPool.Names.GENERIC, this);
}
}

View File

@ -83,7 +83,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
}
private RecoveryResponse recover(final StartRecoveryRequest request) throws IOException {
final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex().getName());
final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
final IndexShard shard = indexService.getShard(request.shardId().id());
// starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise

View File

@ -137,7 +137,7 @@ public class RecoverySourceHandler {
}
}
logger.trace("snapshot translog for recovery. current size is [{}]", translogView.totalOperations());
logger.trace("{} snapshot translog for recovery. current size is [{}]", shard.shardId(), translogView.totalOperations());
try {
phase2(translogView.snapshot());
} catch (Throwable e) {
@ -289,7 +289,7 @@ public class RecoverySourceHandler {
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " +
"checksums are ok", null);
exception.addSuppressed(targetException);
logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK",
logger.warn("{} Remote file corruption during finalization of recovery on node {}. local checksum OK",
corruptIndexException, shard.shardId(), request.targetNode());
throw exception;
} else {

View File

@ -218,7 +218,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
"operations")
.append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]")
.append("\n");
logger.trace(sb.toString());
logger.trace("{}", sb);
} else {
logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryTarget.sourceNode(), recoveryTime);
}

View File

@ -348,7 +348,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
return null;
}
ShardId shardId = request.shardId;
IndexService indexService = indicesService.indexService(shardId.getIndexName());
IndexService indexService = indicesService.indexService(shardId.getIndex());
if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) {
return indexService.getShardOrNull(shardId.id());
}

View File

@ -126,7 +126,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
@Override
protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) {
if (request.unallocated) {
IndexService indexService = indicesService.indexService(request.shardId.getIndexName());
IndexService indexService = indicesService.indexService(request.shardId.getIndex());
if (indexService == null) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
}
@ -150,7 +150,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
long startTimeNS = System.nanoTime();
boolean exists = false;
try {
IndexService indexService = indicesService.indexService(shardId.getIndexName());
IndexService indexService = indicesService.indexService(shardId.getIndex());
if (indexService != null) {
IndexShard indexShard = indexService.getShardOrNull(shardId.id());
if (indexShard != null) {

View File

@ -287,7 +287,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ
logger.error("bulk deletion failures for [{}]/[{}] items", failedItems, bulkResponse.getItems().length);
}
} else {
logger.trace("bulk deletion took " + bulkResponse.getTookInMillis() + "ms");
logger.trace("bulk deletion took {}ms", bulkResponse.getTookInMillis());
}
}

View File

@ -0,0 +1,171 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
public class IngestStats implements Writeable<IngestStats>, ToXContent {
public final static IngestStats PROTO = new IngestStats(null, null);
private final Stats totalStats;
private final Map<String, Stats> statsPerPipeline;
public IngestStats(Stats totalStats, Map<String, Stats> statsPerPipeline) {
this.totalStats = totalStats;
this.statsPerPipeline = statsPerPipeline;
}
/**
* @return The accumulated stats for all pipelines
*/
public Stats getTotalStats() {
return totalStats;
}
/**
* @return The stats on a per pipeline basis
*/
public Map<String, Stats> getStatsPerPipeline() {
return statsPerPipeline;
}
@Override
public IngestStats readFrom(StreamInput in) throws IOException {
Stats totalStats = Stats.PROTO.readFrom(in);
totalStats.readFrom(in);
int size = in.readVInt();
Map<String, Stats> statsPerPipeline = new HashMap<>(size);
for (int i = 0; i < size; i++) {
Stats stats = Stats.PROTO.readFrom(in);
statsPerPipeline.put(in.readString(), stats);
stats.readFrom(in);
}
return new IngestStats(totalStats, statsPerPipeline);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
totalStats.writeTo(out);
out.writeVLong(statsPerPipeline.size());
for (Map.Entry<String, Stats> entry : statsPerPipeline.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("ingest");
builder.startObject("total");
totalStats.toXContent(builder, params);
builder.endObject();
builder.startObject("pipelines");
for (Map.Entry<String, Stats> entry : statsPerPipeline.entrySet()) {
builder.startObject(entry.getKey());
entry.getValue().toXContent(builder, params);
builder.endObject();
}
builder.endObject();
builder.endObject();
return builder;
}
public static class Stats implements Writeable<Stats>, ToXContent {
private final static Stats PROTO = new Stats(0, 0, 0, 0);
private final long ingestCount;
private final long ingestTimeInMillis;
private final long ingestCurrent;
private final long ingestFailedCount;
public Stats(long ingestCount, long ingestTimeInMillis, long ingestCurrent, long ingestFailedCount) {
this.ingestCount = ingestCount;
this.ingestTimeInMillis = ingestTimeInMillis;
this.ingestCurrent = ingestCurrent;
this.ingestFailedCount = ingestFailedCount;
}
/**
* @return The total number of executed ingest preprocessing operations.
*/
public long getIngestCount() {
return ingestCount;
}
/**
*
* @return The total time spent of ingest preprocessing in millis.
*/
public long getIngestTimeInMillis() {
return ingestTimeInMillis;
}
/**
* @return The total number of ingest preprocessing operations currently executing.
*/
public long getIngestCurrent() {
return ingestCurrent;
}
/**
* @return The total number of ingest preprocessing operations that have failed.
*/
public long getIngestFailedCount() {
return ingestFailedCount;
}
@Override
public Stats readFrom(StreamInput in) throws IOException {
long ingestCount = in.readVLong();
long ingestTimeInMillis = in.readVLong();
long ingestCurrent = in.readVLong();
long ingestFailedCount = in.readVLong();
return new Stats(ingestCount, ingestTimeInMillis, ingestCurrent, ingestFailedCount);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(ingestCount);
out.writeVLong(ingestTimeInMillis);
out.writeVLong(ingestCurrent);
out.writeVLong(ingestFailedCount);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("count", ingestCount);
builder.timeValueField("time_in_millis", "time", ingestTimeInMillis, TimeUnit.MILLISECONDS);
builder.field("current", ingestCurrent);
builder.field("failed", ingestFailedCount);
return builder;
}
}
}

View File

@ -19,23 +19,36 @@
package org.elasticsearch.ingest;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.ingest.core.IngestDocument;
import org.elasticsearch.ingest.core.Pipeline;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
public class PipelineExecutionService {
public class PipelineExecutionService implements ClusterStateListener {
private final PipelineStore store;
private final ThreadPool threadPool;
private final StatsHolder totalStats = new StatsHolder();
private volatile Map<String, StatsHolder> statsHolderPerPipeline = Collections.emptyMap();
public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) {
this.store = store;
this.threadPool = threadPool;
@ -89,29 +102,85 @@ public class PipelineExecutionService {
});
}
private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception {
String index = indexRequest.index();
String type = indexRequest.type();
String id = indexRequest.id();
String routing = indexRequest.routing();
String parent = indexRequest.parent();
String timestamp = indexRequest.timestamp();
String ttl = indexRequest.ttl() == null ? null : indexRequest.ttl().toString();
Map<String, Object> sourceAsMap = indexRequest.sourceAsMap();
IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, timestamp, ttl, sourceAsMap);
pipeline.execute(ingestDocument);
public IngestStats stats() {
Map<String, StatsHolder> statsHolderPerPipeline = this.statsHolderPerPipeline;
Map<IngestDocument.MetaData, String> metadataMap = ingestDocument.extractMetadata();
//it's fine to set all metadata fields all the time, as ingest document holds their starting values
//before ingestion, which might also get modified during ingestion.
indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX));
indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE));
indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID));
indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING));
indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT));
indexRequest.timestamp(metadataMap.get(IngestDocument.MetaData.TIMESTAMP));
indexRequest.ttl(metadataMap.get(IngestDocument.MetaData.TTL));
indexRequest.source(ingestDocument.getSourceAndMetadata());
Map<String, IngestStats.Stats> statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size());
for (Map.Entry<String, StatsHolder> entry : statsHolderPerPipeline.entrySet()) {
statsPerPipeline.put(entry.getKey(), entry.getValue().createStats());
}
return new IngestStats(totalStats.createStats(), statsPerPipeline);
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
IngestMetadata ingestMetadata = event.state().getMetaData().custom(IngestMetadata.TYPE);
if (ingestMetadata != null) {
updatePipelineStats(ingestMetadata);
}
}
void updatePipelineStats(IngestMetadata ingestMetadata) {
boolean changed = false;
Map<String, StatsHolder> newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline);
for (String pipeline : newStatsPerPipeline.keySet()) {
if (ingestMetadata.getPipelines().containsKey(pipeline) == false) {
newStatsPerPipeline.remove(pipeline);
changed = true;
}
}
for (String pipeline : ingestMetadata.getPipelines().keySet()) {
if (newStatsPerPipeline.containsKey(pipeline) == false) {
newStatsPerPipeline.put(pipeline, new StatsHolder());
changed = true;
}
}
if (changed) {
statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline);
}
}
private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception {
long startTimeInNanos = System.nanoTime();
// the pipeline specific stat holder may not exist and that is fine:
// (e.g. the pipeline may have been removed while we're ingesting a document
Optional<StatsHolder> pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId()));
try {
totalStats.preIngest();
pipelineStats.ifPresent(StatsHolder::preIngest);
String index = indexRequest.index();
String type = indexRequest.type();
String id = indexRequest.id();
String routing = indexRequest.routing();
String parent = indexRequest.parent();
String timestamp = indexRequest.timestamp();
String ttl = indexRequest.ttl() == null ? null : indexRequest.ttl().toString();
Map<String, Object> sourceAsMap = indexRequest.sourceAsMap();
IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, timestamp, ttl, sourceAsMap);
pipeline.execute(ingestDocument);
Map<IngestDocument.MetaData, String> metadataMap = ingestDocument.extractMetadata();
//it's fine to set all metadata fields all the time, as ingest document holds their starting values
//before ingestion, which might also get modified during ingestion.
indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX));
indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE));
indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID));
indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING));
indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT));
indexRequest.timestamp(metadataMap.get(IngestDocument.MetaData.TIMESTAMP));
indexRequest.ttl(metadataMap.get(IngestDocument.MetaData.TTL));
indexRequest.source(ingestDocument.getSourceAndMetadata());
} catch (Exception e) {
totalStats.ingestFailed();
pipelineStats.ifPresent(StatsHolder::ingestFailed);
throw e;
} finally {
long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos);
totalStats.postIngest(ingestTimeInMillis);
pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis));
}
}
private Pipeline getPipeline(String pipelineId) {
@ -121,4 +190,30 @@ public class PipelineExecutionService {
}
return pipeline;
}
static class StatsHolder {
private final MeanMetric ingestMetric = new MeanMetric();
private final CounterMetric ingestCurrent = new CounterMetric();
private final CounterMetric ingestFailed = new CounterMetric();
void preIngest() {
ingestCurrent.inc();
}
void postIngest(long ingestTimeInMillis) {
ingestCurrent.dec();
ingestMetric.inc(ingestTimeInMillis);
}
void ingestFailed() {
ingestFailed.inc();
}
IngestStats.Stats createStats() {
return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count());
}
}
}

Some files were not shown because too many files have changed in this diff Show More