Merge branch 'master' into feature-suggest-refactoring

This commit is contained in:
Christoph Büscher 2016-02-03 09:51:23 +01:00
commit b7e3323ded
225 changed files with 4031 additions and 6750 deletions

View File

@ -16,7 +16,6 @@
-/plugins/discovery-azure/target -/plugins/discovery-azure/target
-/plugins/discovery-ec2/target -/plugins/discovery-ec2/target
-/plugins/discovery-gce/target -/plugins/discovery-gce/target
-/plugins/discovery-multicast/target
-/plugins/jvm-example/target -/plugins/jvm-example/target
-/plugins/lang-expression/target -/plugins/lang-expression/target
-/plugins/lang-groovy/target -/plugins/lang-groovy/target

View File

@ -94,6 +94,9 @@ class PrecommitTasks {
project.checkstyle { project.checkstyle {
config = project.resources.text.fromFile( config = project.resources.text.fromFile(
PrecommitTasks.getResource('/checkstyle.xml'), 'UTF-8') PrecommitTasks.getResource('/checkstyle.xml'), 'UTF-8')
configProperties = [
suppressions: PrecommitTasks.getResource('/checkstyle_suppressions.xml')
]
} }
for (String taskName : ['checkstyleMain', 'checkstyleTest']) { for (String taskName : ['checkstyleMain', 'checkstyleTest']) {
Task task = project.tasks.findByName(taskName) Task task = project.tasks.findByName(taskName)

View File

@ -6,6 +6,10 @@
<module name="Checker"> <module name="Checker">
<property name="charset" value="UTF-8" /> <property name="charset" value="UTF-8" />
<module name="SuppressionFilter">
<property name="file" value="${suppressions}" />
</module>
<module name="TreeWalker"> <module name="TreeWalker">
<!-- ~3500 violations <!-- ~3500 violations
<module name="LineLength"> <module name="LineLength">

View File

@ -0,0 +1,10 @@
<?xml version="1.0"?>
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
<!-- These files are generated by ANTLR so its silly to hold them to our rules. -->
<suppress files="org/elasticsearch/painless/PainlessLexer\.java" checks="." />
<suppress files="org/elasticsearch/painless/PainlessParser(|BaseVisitor|Visitor)\.java" checks="." />
</suppressions>

View File

@ -19,6 +19,7 @@
package org.elasticsearch; package org.elasticsearch;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -613,7 +614,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136), RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),
TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class, org.elasticsearch.indices.TypeMissingException::new, 137), TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class, org.elasticsearch.indices.TypeMissingException::new, 137),
FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class, org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140), FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class, org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class, org.elasticsearch.index.query.QueryShardException::new, 141); QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class, org.elasticsearch.index.query.QueryShardException::new, 141),
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class, ShardStateAction.NoLongerPrimaryShardException::new, 142);
final Class<? extends ElasticsearchException> exceptionClass; final Class<? extends ElasticsearchException> exceptionClass;
final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor; final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor;

View File

@ -254,7 +254,11 @@ public class Version {
public static final int V_1_7_3_ID = 1070399; public static final int V_1_7_3_ID = 1070399;
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final Version V_1_7_3 = new Version(V_1_7_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_4_ID = 1070499; public static final int V_1_7_4_ID = 1070499;
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final Version V_1_7_4 = new Version(V_1_7_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_5_ID = 1070599;
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_6_ID = 1070699;
public static final Version V_1_7_6 = new Version(V_1_7_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_2_0_0_beta1_ID = 2000001; public static final int V_2_0_0_beta1_ID = 2000001;
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
@ -275,9 +279,13 @@ public class Version {
public static final int V_2_1_1_ID = 2010199; public static final int V_2_1_1_ID = 2010199;
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_2_ID = 2010299; public static final int V_2_1_2_ID = 2010299;
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final Version V_2_1_2 = new Version(V_2_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_3_ID = 2010399;
public static final Version V_2_1_3 = new Version(V_2_1_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_2_0_ID = 2020099; public static final int V_2_2_0_ID = 2020099;
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0); public static final Version V_2_2_0 = new Version(V_2_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final int V_2_2_1_ID = 2020199;
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final int V_2_3_0_ID = 2030099; public static final int V_2_3_0_ID = 2030099;
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0); public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final int V_3_0_0_ID = 3000099; public static final int V_3_0_0_ID = 3000099;
@ -299,8 +307,12 @@ public class Version {
return V_3_0_0; return V_3_0_0;
case V_2_3_0_ID: case V_2_3_0_ID:
return V_2_3_0; return V_2_3_0;
case V_2_2_1_ID:
return V_2_2_1;
case V_2_2_0_ID: case V_2_2_0_ID:
return V_2_2_0; return V_2_2_0;
case V_2_1_3_ID:
return V_2_1_3;
case V_2_1_2_ID: case V_2_1_2_ID:
return V_2_1_2; return V_2_1_2;
case V_2_1_1_ID: case V_2_1_1_ID:
@ -321,6 +333,10 @@ public class Version {
return V_2_0_0_beta2; return V_2_0_0_beta2;
case V_2_0_0_beta1_ID: case V_2_0_0_beta1_ID:
return V_2_0_0_beta1; return V_2_0_0_beta1;
case V_1_7_6_ID:
return V_1_7_6;
case V_1_7_5_ID:
return V_1_7_5;
case V_1_7_4_ID: case V_1_7_4_ID:
return V_1_7_4; return V_1_7_4;
case V_1_7_3_ID: case V_1_7_3_ID:

View File

@ -58,7 +58,7 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
} }
@Override @Override
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) throws Throwable { protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
indexShard.flush(shardRequest.getRequest()); indexShard.flush(shardRequest.getRequest());
logger.trace("{} flush request executed on primary", indexShard.shardId()); logger.trace("{} flush request executed on primary", indexShard.shardId());

View File

@ -60,7 +60,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Basi
} }
@Override @Override
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) throws Throwable { protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
indexShard.refresh("api"); indexShard.refresh("api");
logger.trace("{} refresh request executed on primary", indexShard.shardId()); logger.trace("{} refresh request executed on primary", indexShard.shardId());

View File

@ -140,7 +140,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
} }
@Override @Override
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Throwable { protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception {
// validate, if routing is required, that we got routing // validate, if routing is required, that we got routing
IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex()); IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
@ -200,7 +200,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
* Execute the given {@link IndexRequest} on a primary shard, throwing a * Execute the given {@link IndexRequest} on a primary shard, throwing a
* {@link RetryOnPrimaryException} if the operation needs to be re-tried. * {@link RetryOnPrimaryException} if the operation needs to be re-tried.
*/ */
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Throwable { public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception {
Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
final ShardId shardId = indexShard.shardId(); final ShardId shardId = indexShard.shardId();

View File

@ -22,31 +22,24 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.StatusToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
public class SimulatePipelineResponse extends ActionResponse implements StatusToXContent { public class SimulatePipelineResponse extends ActionResponse implements ToXContent {
private String pipelineId; private String pipelineId;
private boolean verbose; private boolean verbose;
private List<SimulateDocumentResult> results; private List<SimulateDocumentResult> results;
private PipelineFactoryError error;
public SimulatePipelineResponse() { public SimulatePipelineResponse() {
} }
public SimulatePipelineResponse(PipelineFactoryError error) {
this.error = error;
}
public SimulatePipelineResponse(String pipelineId, boolean verbose, List<SimulateDocumentResult> responses) { public SimulatePipelineResponse(String pipelineId, boolean verbose, List<SimulateDocumentResult> responses) {
this.pipelineId = pipelineId; this.pipelineId = pipelineId;
this.verbose = verbose; this.verbose = verbose;
@ -65,25 +58,9 @@ public class SimulatePipelineResponse extends ActionResponse implements StatusTo
return verbose; return verbose;
} }
public boolean isError() {
return error != null;
}
@Override
public RestStatus status() {
if (isError()) {
return RestStatus.BAD_REQUEST;
}
return RestStatus.OK;
}
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeBoolean(isError());
if (isError()) {
error.writeTo(out);
} else {
out.writeString(pipelineId); out.writeString(pipelineId);
out.writeBoolean(verbose); out.writeBoolean(verbose);
out.writeVInt(results.size()); out.writeVInt(results.size());
@ -91,16 +68,10 @@ public class SimulatePipelineResponse extends ActionResponse implements StatusTo
response.writeTo(out); response.writeTo(out);
} }
} }
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
boolean isError = in.readBoolean();
if (isError) {
error = new PipelineFactoryError();
error.readFrom(in);
} else {
this.pipelineId = in.readString(); this.pipelineId = in.readString();
boolean verbose = in.readBoolean(); boolean verbose = in.readBoolean();
int responsesLength = in.readVInt(); int responsesLength = in.readVInt();
@ -115,19 +86,14 @@ public class SimulatePipelineResponse extends ActionResponse implements StatusTo
results.add(simulateDocumentResult); results.add(simulateDocumentResult);
} }
} }
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (isError()) {
error.toXContent(builder, params);
} else {
builder.startArray(Fields.DOCUMENTS); builder.startArray(Fields.DOCUMENTS);
for (SimulateDocumentResult response : results) { for (SimulateDocumentResult response : results) {
response.toXContent(builder, params); response.toXContent(builder, params);
} }
builder.endArray(); builder.endArray();
}
return builder; return builder;
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.ingest; package org.elasticsearch.action.ingest;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.HandledTransportAction;
@ -27,8 +28,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.ingest.PipelineStore; import org.elasticsearch.ingest.PipelineStore;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -58,9 +57,6 @@ public class SimulatePipelineTransportAction extends HandledTransportAction<Simu
} else { } else {
simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), pipelineStore); simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), pipelineStore);
} }
} catch (ConfigurationPropertyException e) {
listener.onResponse(new SimulatePipelineResponse(new PipelineFactoryError(e)));
return;
} catch (Exception e) { } catch (Exception e) {
listener.onFailure(e); listener.onFailure(e);
return; return;

View File

@ -22,12 +22,10 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import java.io.IOException; import java.io.IOException;
public class WritePipelineResponse extends AcknowledgedResponse { public class WritePipelineResponse extends AcknowledgedResponse {
private PipelineFactoryError error;
WritePipelineResponse() { WritePipelineResponse() {
@ -35,36 +33,17 @@ public class WritePipelineResponse extends AcknowledgedResponse {
public WritePipelineResponse(boolean acknowledged) { public WritePipelineResponse(boolean acknowledged) {
super(acknowledged); super(acknowledged);
if (!isAcknowledged()) {
error = new PipelineFactoryError("pipeline write is not acknowledged");
}
}
public WritePipelineResponse(PipelineFactoryError error) {
super(false);
this.error = error;
}
public PipelineFactoryError getError() {
return error;
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
readAcknowledged(in); readAcknowledged(in);
if (!isAcknowledged()) {
error = new PipelineFactoryError();
error.readFrom(in);
}
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
writeAcknowledged(out); writeAcknowledged(out);
if (!isAcknowledged()) {
error.writeTo(out);
}
} }
} }

View File

@ -1,41 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
import java.io.IOException;
public class WritePipelineResponseRestListener extends AcknowledgedRestListener<WritePipelineResponse> {
public WritePipelineResponseRestListener(RestChannel channel) {
super(channel);
}
@Override
protected void addCustomFields(XContentBuilder builder, WritePipelineResponse response) throws IOException {
if (!response.isAcknowledged()) {
response.getError().toXContent(builder, null);
}
}
}

View File

@ -55,6 +55,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
private long routedBasedOnClusterVersion = 0;
public ReplicationRequest() { public ReplicationRequest() {
} }
@ -141,6 +143,20 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
return (Request) this; return (Request) this;
} }
/**
* Sets the minimum version of the cluster state that is required on the next node before we redirect to another primary.
* Used to prevent redirect loops, see also {@link TransportReplicationAction.ReroutePhase#doRun()}
*/
@SuppressWarnings("unchecked")
Request routedBasedOnClusterVersion(long routedBasedOnClusterVersion) {
this.routedBasedOnClusterVersion = routedBasedOnClusterVersion;
return (Request) this;
}
long routedBasedOnClusterVersion() {
return routedBasedOnClusterVersion;
}
@Override @Override
public ActionRequestValidationException validate() { public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null; ActionRequestValidationException validationException = null;
@ -161,6 +177,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
consistencyLevel = WriteConsistencyLevel.fromId(in.readByte()); consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
timeout = TimeValue.readTimeValue(in); timeout = TimeValue.readTimeValue(in);
index = in.readString(); index = in.readString();
routedBasedOnClusterVersion = in.readVLong();
} }
@Override @Override
@ -175,6 +192,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
out.writeByte(consistencyLevel.id()); out.writeByte(consistencyLevel.id());
timeout.writeTo(out); timeout.writeTo(out);
out.writeString(index); out.writeString(index);
out.writeVLong(routedBasedOnClusterVersion);
} }
/** /**

View File

@ -56,6 +56,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
@ -156,10 +157,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
/** /**
* Primary operation on node with primary copy, the provided metadata should be used for request validation if needed * Primary operation on node with primary copy, the provided metadata should be used for request validation if needed
*
* @return A tuple containing not null values, as first value the result of the primary operation and as second value * @return A tuple containing not null values, as first value the result of the primary operation and as second value
* the request to be executed on the replica shards. * the request to be executed on the replica shards.
*/ */
protected abstract Tuple<Response, ReplicaRequest> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable; protected abstract Tuple<Response, ReplicaRequest> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception;
/** /**
* Replica operation on nodes with replica copies * Replica operation on nodes with replica copies
@ -299,7 +301,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
setShard(shardId); setShard(shardId);
} }
public RetryOnReplicaException(StreamInput in) throws IOException{ public RetryOnReplicaException(StreamInput in) throws IOException {
super(in); super(in);
} }
} }
@ -320,7 +322,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
public void onFailure(Throwable t) { public void onFailure(Throwable t) {
if (t instanceof RetryOnReplicaException) { if (t instanceof RetryOnReplicaException) {
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request); logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request);
final ThreadContext threadContext = threadPool.getThreadContext();
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() { observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override @Override
@ -353,6 +354,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
} }
} }
} }
private void failReplicaIfNeeded(Throwable t) { private void failReplicaIfNeeded(Throwable t) {
String index = request.shardId().getIndex().getName(); String index = request.shardId().getIndex().getName();
int shardId = request.shardId().id(); int shardId = request.shardId().id();
@ -384,7 +386,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override @Override
protected void doRun() throws Exception { protected void doRun() throws Exception {
assert request.shardId() != null : "request shardId must be set"; assert request.shardId() != null : "request shardId must be set";
try (Releasable ignored = getIndexShardOperationsCounter(request.shardId())) { try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId())) {
shardOperationOnReplica(request); shardOperationOnReplica(request);
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), request); logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), request);
@ -400,7 +402,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
setShard(shardId); setShard(shardId);
} }
public RetryOnPrimaryException(StreamInput in) throws IOException{ public RetryOnPrimaryException(StreamInput in) throws IOException {
super(in); super(in);
} }
} }
@ -446,6 +448,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
handleBlockException(blockException); handleBlockException(blockException);
return; return;
} }
// request does not have a shardId yet, we need to pass the concrete index to resolve shardId // request does not have a shardId yet, we need to pass the concrete index to resolve shardId
resolveRequest(state.metaData(), concreteIndex, request); resolveRequest(state.metaData(), concreteIndex, request);
assert request.shardId() != null : "request shardId must be set in resolveRequest"; assert request.shardId() != null : "request shardId must be set in resolveRequest";
@ -469,6 +472,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
} }
performAction(node, transportPrimaryAction, true); performAction(node, transportPrimaryAction, true);
} else { } else {
if (state.version() < request.routedBasedOnClusterVersion()) {
logger.trace("failed to find primary [{}] for request [{}] despite sender thinking it would be here. Local cluster state version [{}]] is older than on sending node (version [{}]), scheduling a retry...", request.shardId(), request, state.version(), request.routedBasedOnClusterVersion());
retryBecauseUnavailable(request.shardId(), "failed to find primary as current cluster state with version [" + state.version() + "] is stale (expected at least [" + request.routedBasedOnClusterVersion() + "]");
return;
} else {
// chasing the node with the active primary for a second hop requires that we are at least up-to-date with the current cluster state version
// this prevents redirect loops between two nodes when a primary was relocated and the relocation target is not aware that it is the active primary shard already.
request.routedBasedOnClusterVersion(state.version());
}
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]", actionName, request.shardId(), request, state.version(), primary.currentNodeId()); logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]", actionName, request.shardId(), request, state.version(), primary.currentNodeId());
} }
@ -528,7 +540,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
finishAsFailed(failure); finishAsFailed(failure);
return; return;
} }
final ThreadContext threadContext = threadPool.getThreadContext();
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() { observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override @Override
@ -586,47 +597,29 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
} }
/** /**
* Responsible for performing primary operation locally and delegating to replication action once successful * Responsible for performing primary operation locally or delegating primary operation to relocation target in case where shard has
* been marked as RELOCATED. Delegates to replication action once successful.
* <p> * <p>
* Note that as soon as we move to replication action, state responsibility is transferred to {@link ReplicationPhase}. * Note that as soon as we move to replication action, state responsibility is transferred to {@link ReplicationPhase}.
*/ */
final class PrimaryPhase extends AbstractRunnable { class PrimaryPhase extends AbstractRunnable {
private final Request request; private final Request request;
private final ShardId shardId;
private final TransportChannel channel; private final TransportChannel channel;
private final ClusterState state; private final ClusterState state;
private final AtomicBoolean finished = new AtomicBoolean(); private final AtomicBoolean finished = new AtomicBoolean();
private Releasable indexShardReference; private IndexShardReference indexShardReference;
PrimaryPhase(Request request, TransportChannel channel) { PrimaryPhase(Request request, TransportChannel channel) {
this.state = clusterService.state(); this.state = clusterService.state();
this.request = request; this.request = request;
assert request.shardId() != null : "request shardId must be set prior to primary phase";
this.shardId = request.shardId();
this.channel = channel; this.channel = channel;
} }
@Override @Override
public void onFailure(Throwable e) { public void onFailure(Throwable e) {
finishAsFailed(e);
}
@Override
protected void doRun() throws Exception {
// request shardID was set in ReroutePhase
assert request.shardId() != null : "request shardID must be set prior to primary phase";
final ShardId shardId = request.shardId();
final String writeConsistencyFailure = checkWriteConsistency(shardId);
if (writeConsistencyFailure != null) {
finishBecauseUnavailable(shardId, writeConsistencyFailure);
return;
}
final ReplicationPhase replicationPhase;
try {
indexShardReference = getIndexShardOperationsCounter(shardId);
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
}
replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
} catch (Throwable e) {
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("failed to execute [{}] on [{}]", e, request, shardId); logger.trace("failed to execute [{}] on [{}]", e, request, shardId);
@ -637,9 +630,38 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
} }
} }
finishAsFailed(e); finishAsFailed(e);
}
@Override
protected void doRun() throws Exception {
// request shardID was set in ReroutePhase
final String writeConsistencyFailure = checkWriteConsistency(shardId);
if (writeConsistencyFailure != null) {
finishBecauseUnavailable(shardId, writeConsistencyFailure);
return; return;
} }
// closed in finishAsFailed(e) in the case of error
indexShardReference = getIndexShardReferenceOnPrimary(shardId);
if (indexShardReference.isRelocated() == false) {
// execute locally
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
}
ReplicationPhase replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
finishAndMoveToReplication(replicationPhase); finishAndMoveToReplication(replicationPhase);
} else {
// delegate primary phase to relocation target
// it is safe to execute primary phase on relocation target as there are no more in-flight operations where primary
// phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase.
final ShardRouting primary = indexShardReference.routingEntry();
indexShardReference.close();
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
DiscoveryNode relocatingNode = state.nodes().get(primary.relocatingNodeId());
transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions,
TransportChannelResponseHandler.responseHandler(logger, TransportReplicationAction.this::newResponseInstance, channel,
"rerouting indexing to target primary " + primary));
}
} }
/** /**
@ -723,10 +745,24 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
} }
} }
protected Releasable getIndexShardOperationsCounter(ShardId shardId) { /**
* returns a new reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally
* and replication of the operation to all replica shards is completed / failed (see {@link ReplicationPhase}).
*/
protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
return new IndexShardReference(indexShard); return new IndexShardReferenceImpl(indexShard, true);
}
/**
* returns a new reference to {@link IndexShard} on a node that the request is replicated to. The reference is closed as soon as
* replication is completed on the node.
*/
protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
return new IndexShardReferenceImpl(indexShard, false);
} }
/** /**
@ -742,16 +778,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private final List<ShardRouting> shards; private final List<ShardRouting> shards;
private final DiscoveryNodes nodes; private final DiscoveryNodes nodes;
private final boolean executeOnReplica; private final boolean executeOnReplica;
private final String indexUUID;
private final AtomicBoolean finished = new AtomicBoolean(); private final AtomicBoolean finished = new AtomicBoolean();
private final AtomicInteger success = new AtomicInteger(1); // We already wrote into the primary shard private final AtomicInteger success = new AtomicInteger(1); // We already wrote into the primary shard
private final ConcurrentMap<String, Throwable> shardReplicaFailures = ConcurrentCollections.newConcurrentMap(); private final ConcurrentMap<String, Throwable> shardReplicaFailures = ConcurrentCollections.newConcurrentMap();
private final AtomicInteger pending; private final AtomicInteger pending;
private final int totalShards; private final int totalShards;
private final Releasable indexShardReference; private final IndexShardReference indexShardReference;
public ReplicationPhase(ReplicaRequest replicaRequest, Response finalResponse, ShardId shardId, public ReplicationPhase(ReplicaRequest replicaRequest, Response finalResponse, ShardId shardId,
TransportChannel channel, Releasable indexShardReference) { TransportChannel channel, IndexShardReference indexShardReference) {
this.replicaRequest = replicaRequest; this.replicaRequest = replicaRequest;
this.channel = channel; this.channel = channel;
this.finalResponse = finalResponse; this.finalResponse = finalResponse;
@ -768,7 +803,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex()); final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex());
this.shards = (shardRoutingTable != null) ? shardRoutingTable.shards() : Collections.emptyList(); this.shards = (shardRoutingTable != null) ? shardRoutingTable.shards() : Collections.emptyList();
this.executeOnReplica = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings()); this.executeOnReplica = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings());
this.indexUUID = (indexMetaData != null) ? indexMetaData.getIndexUUID() : null;
this.nodes = state.getNodes(); this.nodes = state.getNodes();
if (shards.isEmpty()) { if (shards.isEmpty()) {
@ -779,19 +813,22 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
int numberOfIgnoredShardInstances = 0; int numberOfIgnoredShardInstances = 0;
int numberOfPendingShardInstances = 0; int numberOfPendingShardInstances = 0;
for (ShardRouting shard : shards) { for (ShardRouting shard : shards) {
// the following logic to select the shards to replicate to is mirrored and explained in the doRun method below
if (shard.primary() == false && executeOnReplica == false) { if (shard.primary() == false && executeOnReplica == false) {
numberOfIgnoredShardInstances++; numberOfIgnoredShardInstances++;
} else if (shard.unassigned()) { continue;
}
if (shard.unassigned()) {
numberOfIgnoredShardInstances++; numberOfIgnoredShardInstances++;
} else { continue;
if (shard.currentNodeId().equals(nodes.localNodeId()) == false) { }
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
numberOfPendingShardInstances++; numberOfPendingShardInstances++;
} }
if (shard.relocating()) { if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
numberOfPendingShardInstances++; numberOfPendingShardInstances++;
} }
} }
}
// one for the local primary copy // one for the local primary copy
this.totalShards = 1 + numberOfPendingShardInstances + numberOfIgnoredShardInstances; this.totalShards = 1 + numberOfPendingShardInstances + numberOfIgnoredShardInstances;
this.pending = new AtomicInteger(numberOfPendingShardInstances); this.pending = new AtomicInteger(numberOfPendingShardInstances);
@ -862,7 +899,8 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
performOnReplica(shard); performOnReplica(shard);
} }
// send operation to relocating shard // send operation to relocating shard
if (shard.relocating()) { // local shard can be a relocation target of a primary that is in relocated state
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
performOnReplica(shard.buildTargetRelocatingShard()); performOnReplica(shard.buildTargetRelocatingShard());
} }
} }
@ -901,7 +939,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
logger.warn("[{}] {}", exp, shardId, message); logger.warn("[{}] {}", exp, shardId, message);
shardStateAction.shardFailed( shardStateAction.shardFailed(
shard, shard,
indexUUID, indexShardReference.routingEntry(),
message, message,
exp, exp,
new ShardStateAction.Listener() { new ShardStateAction.Listener() {
@ -995,21 +1033,39 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
return IndexMetaData.isIndexUsingShadowReplicas(settings) == false; return IndexMetaData.isIndexUsingShadowReplicas(settings) == false;
} }
static class IndexShardReference implements Releasable { interface IndexShardReference extends Releasable {
boolean isRelocated();
final private IndexShard counter; ShardRouting routingEntry();
private final AtomicBoolean closed = new AtomicBoolean(); }
IndexShardReference(IndexShard counter) { static final class IndexShardReferenceImpl implements IndexShardReference {
counter.incrementOperationCounter();
this.counter = counter; private final IndexShard indexShard;
private final Releasable operationLock;
IndexShardReferenceImpl(IndexShard indexShard, boolean primaryAction) {
this.indexShard = indexShard;
if (primaryAction) {
operationLock = indexShard.acquirePrimaryOperationLock();
} else {
operationLock = indexShard.acquireReplicaOperationLock();
}
} }
@Override @Override
public void close() { public void close() {
if (closed.compareAndSet(false, true)) { operationLock.close();
counter.decrementOperationCounter();
} }
@Override
public boolean isRelocated() {
return indexShard.state() == IndexShardState.RELOCATED;
}
@Override
public ShardRouting routingEntry() {
return indexShard.routingEntry();
} }
} }

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool; import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig; import org.elasticsearch.common.cli.CliToolConfig;
import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
@ -82,7 +83,9 @@ final class BootstrapCLIParser extends CliTool {
@Override @Override
public ExitStatus execute(Settings settings, Environment env) throws Exception { public ExitStatus execute(Settings settings, Environment env) throws Exception {
terminal.println("Version: %s, Build: %s/%s, JVM: %s", org.elasticsearch.Version.CURRENT, Build.CURRENT.shortHash(), Build.CURRENT.date(), JvmInfo.jvmInfo().version()); terminal.println("Version: " + org.elasticsearch.Version.CURRENT
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
+ ", JVM: " + JvmInfo.jvmInfo().version());
return ExitStatus.OK_AND_EXIT; return ExitStatus.OK_AND_EXIT;
} }
} }
@ -103,7 +106,7 @@ final class BootstrapCLIParser extends CliTool {
// TODO: don't use system properties as a way to do this, its horrible... // TODO: don't use system properties as a way to do this, its horrible...
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters") @SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
public static Command parse(Terminal terminal, CommandLine cli) { public static Command parse(Terminal terminal, CommandLine cli) throws UserError {
if (cli.hasOption("V")) { if (cli.hasOption("V")) {
return Version.parse(terminal, cli); return Version.parse(terminal, cli);
} }
@ -132,11 +135,11 @@ final class BootstrapCLIParser extends CliTool {
String arg = iterator.next(); String arg = iterator.next();
if (!arg.startsWith("--")) { if (!arg.startsWith("--")) {
if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) { if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) {
throw new IllegalArgumentException( throw new UserError(ExitStatus.USAGE,
"Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --" "Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --"
); );
} else { } else {
throw new IllegalArgumentException("Parameter [" + arg + "]does not start with --"); throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "]does not start with --");
} }
} }
// if there is no = sign, we have to get the next argu // if there is no = sign, we have to get the next argu
@ -150,11 +153,11 @@ final class BootstrapCLIParser extends CliTool {
if (iterator.hasNext()) { if (iterator.hasNext()) {
String value = iterator.next(); String value = iterator.next();
if (value.startsWith("--")) { if (value.startsWith("--")) {
throw new IllegalArgumentException("Parameter [" + arg + "] needs value"); throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
} }
System.setProperty("es." + arg, value); System.setProperty("es." + arg, value);
} else { } else {
throw new IllegalArgumentException("Parameter [" + arg + "] needs value"); throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
} }
} }
} }

View File

@ -25,7 +25,7 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.http.netty.NettyHttpServerTransport; import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.plugins.PluginInfo; import org.elasticsearch.plugins.PluginInfo;
import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.transport.TransportSettings;
@ -270,9 +270,7 @@ final class Security {
static void addBindPermissions(Permissions policy, Settings settings) throws IOException { static void addBindPermissions(Permissions policy, Settings settings) throws IOException {
// http is simple // http is simple
String httpRange = settings.get("http.netty.port", String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString();
settings.get("http.port",
NettyHttpServerTransport.DEFAULT_PORT_RANGE));
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted. // listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
// see SocketPermission implies() code // see SocketPermission implies() code
policy.add(new SocketPermission("*:" + httpRange, "listen,resolve")); policy.add(new SocketPermission("*:" + httpRange, "listen,resolve"));

View File

@ -25,7 +25,9 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.recycler.AbstractRecyclerC; import org.elasticsearch.common.recycler.AbstractRecyclerC;
import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -41,9 +43,13 @@ import static org.elasticsearch.common.recycler.Recyclers.none;
/** A recycler of fixed-size pages. */ /** A recycler of fixed-size pages. */
public class PageCacheRecycler extends AbstractComponent implements Releasable { public class PageCacheRecycler extends AbstractComponent implements Releasable {
public static final String TYPE = "recycler.page.type"; public static final Setting<Type> TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, false, Setting.Scope.CLUSTER);
public static final String LIMIT_HEAP = "recycler.page.limit.heap"; public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING = Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", false, Setting.Scope.CLUSTER);
public static final String WEIGHT = "recycler.page.weight"; public static final Setting<Double> WEIGHT_BYTES_SETTING = Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_LONG_SETTING = Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_INT_SETTING = Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, false, Setting.Scope.CLUSTER);
// object pages are less useful to us so we give them a lower weight by default
public static final Setting<Double> WEIGHT_OBJECTS_SETTING = Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, false, Setting.Scope.CLUSTER);
private final Recycler<byte[]> bytePage; private final Recycler<byte[]> bytePage;
private final Recycler<int[]> intPage; private final Recycler<int[]> intPage;
@ -73,8 +79,8 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
@Inject @Inject
public PageCacheRecycler(Settings settings, ThreadPool threadPool) { public PageCacheRecycler(Settings settings, ThreadPool threadPool) {
super(settings); super(settings);
final Type type = Type.parse(settings.get(TYPE)); final Type type = TYPE_SETTING .get(settings);
final long limit = settings.getAsMemory(LIMIT_HEAP, "10%").bytes(); final long limit = LIMIT_HEAP_SETTING .get(settings).bytes();
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
final int searchThreadPoolSize = maximumSearchThreadPoolSize(threadPool, settings); final int searchThreadPoolSize = maximumSearchThreadPoolSize(threadPool, settings);
@ -91,11 +97,10 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
// to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues // to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues
// that would need to be addressed such as garbage collection of native memory or safety // that would need to be addressed such as garbage collection of native memory or safety
// of Unsafe writes. // of Unsafe writes.
final double bytesWeight = settings.getAsDouble(WEIGHT + ".bytes", 1d); final double bytesWeight = WEIGHT_BYTES_SETTING .get(settings);
final double intsWeight = settings.getAsDouble(WEIGHT + ".ints", 1d); final double intsWeight = WEIGHT_INT_SETTING .get(settings);
final double longsWeight = settings.getAsDouble(WEIGHT + ".longs", 1d); final double longsWeight = WEIGHT_LONG_SETTING .get(settings);
// object pages are less useful to us so we give them a lower weight by default final double objectsWeight = WEIGHT_OBJECTS_SETTING .get(settings);
final double objectsWeight = settings.getAsDouble(WEIGHT + ".objects", 0.1d);
final double totalWeight = bytesWeight + intsWeight + longsWeight + objectsWeight; final double totalWeight = bytesWeight + intsWeight + longsWeight + objectsWeight;
final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / BigArrays.PAGE_SIZE_IN_BYTES); final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / BigArrays.PAGE_SIZE_IN_BYTES);
@ -190,7 +195,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
return recycler; return recycler;
} }
public static enum Type { public enum Type {
QUEUE { QUEUE {
@Override @Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) { <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
@ -211,9 +216,6 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
}; };
public static Type parse(String type) { public static Type parse(String type) {
if (Strings.isNullOrEmpty(type)) {
return CONCURRENT;
}
try { try {
return Type.valueOf(type.toUpperCase(Locale.ROOT)); return Type.valueOf(type.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {

View File

@ -123,6 +123,11 @@ public interface ClusterStateTaskExecutor<T> {
return this == SUCCESS; return this == SUCCESS;
} }
public Throwable getFailure() {
assert !isSuccess();
return failure;
}
/** /**
* Handle the execution result with the provided consumers * Handle the execution result with the provided consumers
* @param onSuccess handler to invoke on success * @param onSuccess handler to invoke on success

View File

@ -94,7 +94,7 @@ public class MappingUpdatedAction extends AbstractComponent {
} }
} }
public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Throwable { public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null); updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null);
} }
@ -102,7 +102,7 @@ public class MappingUpdatedAction extends AbstractComponent {
* Same as {@link #updateMappingOnMasterSynchronously(String, String, Mapping, TimeValue)} * Same as {@link #updateMappingOnMasterSynchronously(String, String, Mapping, TimeValue)}
* using the default timeout. * using the default timeout.
*/ */
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Throwable { public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout); updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout);
} }
@ -111,7 +111,7 @@ public class MappingUpdatedAction extends AbstractComponent {
* {@code timeout}. When this method returns successfully mappings have * {@code timeout}. When this method returns successfully mappings have
* been applied to the master node and propagated to data nodes. * been applied to the master node and propagated to data nodes.
*/ */
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Throwable { public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception {
if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) { if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) {
throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]"); throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]");
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.action.shard; package org.elasticsearch.cluster.action.shard;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
@ -28,8 +29,9 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.MasterNodeChangePredicate;
import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
@ -46,6 +48,7 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectTransportException;
@ -60,6 +63,7 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
@ -125,17 +129,22 @@ public class ShardStateAction extends AbstractComponent {
return ExceptionsHelper.unwrap(exp, MASTER_CHANNEL_EXCEPTIONS) != null; return ExceptionsHelper.unwrap(exp, MASTER_CHANNEL_EXCEPTIONS) != null;
} }
public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) { /**
* Send a shard failed request to the master node to update the
* cluster state.
*
* @param shardRouting the shard to fail
* @param sourceShardRouting the source shard requesting the failure (must be the shard itself, or the primary shard)
* @param message the reason for the failure
* @param failure the underlying cause of the failure
* @param listener callback upon completion of the request
*/
public void shardFailed(final ShardRouting shardRouting, ShardRouting sourceShardRouting, final String message, @Nullable final Throwable failure, Listener listener) {
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure); ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, sourceShardRouting, message, failure);
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardRoutingEntry, listener); sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardRoutingEntry, listener);
} }
public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
logger.trace("{} re-sending failed shard [{}], index UUID [{}], reason [{}]", shardRouting.shardId(), failure, shardRouting, indexUUID, message);
shardFailed(shardRouting, indexUUID, message, failure, listener);
}
// visible for testing // visible for testing
protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardRoutingEntry shardRoutingEntry, Listener listener) { protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardRoutingEntry shardRoutingEntry, Listener listener) {
observer.waitForNextChange(new ClusterStateObserver.Listener() { observer.waitForNextChange(new ClusterStateObserver.Listener() {
@ -231,15 +240,15 @@ public class ShardStateAction extends AbstractComponent {
// partition tasks into those that correspond to shards // partition tasks into those that correspond to shards
// that exist versus do not exist // that exist versus do not exist
Map<Boolean, List<ShardRoutingEntry>> partition = Map<ValidationResult, List<ShardRoutingEntry>> partition =
tasks.stream().collect(Collectors.partitioningBy(task -> shardExists(currentState, task))); tasks.stream().collect(Collectors.groupingBy(task -> validateTask(currentState, task)));
// tasks that correspond to non-existent shards are marked // tasks that correspond to non-existent shards are marked
// as successful // as successful
batchResultBuilder.successes(partition.get(false)); batchResultBuilder.successes(partition.getOrDefault(ValidationResult.SHARD_MISSING, Collections.emptyList()));
ClusterState maybeUpdatedState = currentState; ClusterState maybeUpdatedState = currentState;
List<ShardRoutingEntry> tasksToFail = partition.get(true); List<ShardRoutingEntry> tasksToFail = partition.getOrDefault(ValidationResult.VALID, Collections.emptyList());
try { try {
List<FailedRerouteAllocation.FailedShard> failedShards = List<FailedRerouteAllocation.FailedShard> failedShards =
tasksToFail tasksToFail
@ -257,6 +266,15 @@ public class ShardStateAction extends AbstractComponent {
batchResultBuilder.failures(tasksToFail, t); batchResultBuilder.failures(tasksToFail, t);
} }
partition
.getOrDefault(ValidationResult.SOURCE_INVALID, Collections.emptyList())
.forEach(task -> batchResultBuilder.failure(
task,
new NoLongerPrimaryShardException(
task.getShardRouting().shardId(),
"source shard [" + task.sourceShardRouting + "] is neither the local allocation nor the primary allocation")
));
return batchResultBuilder.build(maybeUpdatedState); return batchResultBuilder.build(maybeUpdatedState);
} }
@ -265,17 +283,36 @@ public class ShardStateAction extends AbstractComponent {
return allocationService.applyFailedShards(currentState, failedShards); return allocationService.applyFailedShards(currentState, failedShards);
} }
private boolean shardExists(ClusterState currentState, ShardRoutingEntry task) { private enum ValidationResult {
VALID,
SOURCE_INVALID,
SHARD_MISSING
}
private ValidationResult validateTask(ClusterState currentState, ShardRoutingEntry task) {
// non-local requests
if (!task.shardRouting.isSameAllocation(task.sourceShardRouting)) {
IndexShardRoutingTable indexShard = currentState.getRoutingTable().shardRoutingTableOrNull(task.shardRouting.shardId());
if (indexShard == null) {
return ValidationResult.SOURCE_INVALID;
}
ShardRouting primaryShard = indexShard.primaryShard();
if (primaryShard == null || !primaryShard.isSameAllocation(task.sourceShardRouting)) {
return ValidationResult.SOURCE_INVALID;
}
}
RoutingNodes.RoutingNodeIterator routingNodeIterator = RoutingNodes.RoutingNodeIterator routingNodeIterator =
currentState.getRoutingNodes().routingNodeIter(task.getShardRouting().currentNodeId()); currentState.getRoutingNodes().routingNodeIter(task.getShardRouting().currentNodeId());
if (routingNodeIterator != null) { if (routingNodeIterator != null) {
for (ShardRouting maybe : routingNodeIterator) { for (ShardRouting maybe : routingNodeIterator) {
if (task.getShardRouting().isSameAllocation(maybe)) { if (task.getShardRouting().isSameAllocation(maybe)) {
return true; return ValidationResult.VALID;
} }
} }
} }
return false; return ValidationResult.SHARD_MISSING;
} }
@Override @Override
@ -291,9 +328,9 @@ public class ShardStateAction extends AbstractComponent {
} }
} }
public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String message, Listener listener) { public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) {
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, null); ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, shardRouting, message, null);
sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardRoutingEntry, listener); sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardRoutingEntry, listener);
} }
@ -360,16 +397,16 @@ public class ShardStateAction extends AbstractComponent {
public static class ShardRoutingEntry extends TransportRequest { public static class ShardRoutingEntry extends TransportRequest {
ShardRouting shardRouting; ShardRouting shardRouting;
String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; ShardRouting sourceShardRouting;
String message; String message;
Throwable failure; Throwable failure;
public ShardRoutingEntry() { public ShardRoutingEntry() {
} }
ShardRoutingEntry(ShardRouting shardRouting, String indexUUID, String message, @Nullable Throwable failure) { ShardRoutingEntry(ShardRouting shardRouting, ShardRouting sourceShardRouting, String message, @Nullable Throwable failure) {
this.shardRouting = shardRouting; this.shardRouting = shardRouting;
this.indexUUID = indexUUID; this.sourceShardRouting = sourceShardRouting;
this.message = message; this.message = message;
this.failure = failure; this.failure = failure;
} }
@ -382,7 +419,7 @@ public class ShardStateAction extends AbstractComponent {
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
shardRouting = readShardRoutingEntry(in); shardRouting = readShardRoutingEntry(in);
indexUUID = in.readString(); sourceShardRouting = readShardRoutingEntry(in);
message = in.readString(); message = in.readString();
failure = in.readThrowable(); failure = in.readThrowable();
} }
@ -391,18 +428,25 @@ public class ShardStateAction extends AbstractComponent {
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
shardRouting.writeTo(out); shardRouting.writeTo(out);
out.writeString(indexUUID); sourceShardRouting.writeTo(out);
out.writeString(message); out.writeString(message);
out.writeThrowable(failure); out.writeThrowable(failure);
} }
@Override @Override
public String toString() { public String toString() {
return "" + shardRouting + ", indexUUID [" + indexUUID + "], message [" + message + "], failure [" + ExceptionsHelper.detailedMessage(failure) + "]"; return String.format(
Locale.ROOT,
"failed shard [%s], source shard [%s], message [%s], failure [%s]",
shardRouting,
sourceShardRouting,
message,
ExceptionsHelper.detailedMessage(failure));
} }
} }
public interface Listener { public interface Listener {
default void onSuccess() { default void onSuccess() {
} }
@ -423,6 +467,20 @@ public class ShardStateAction extends AbstractComponent {
*/ */
default void onFailure(final Throwable t) { default void onFailure(final Throwable t) {
} }
}
public static class NoLongerPrimaryShardException extends ElasticsearchException {
public NoLongerPrimaryShardException(ShardId shardId, String msg) {
super(msg);
setShard(shardId);
}
public NoLongerPrimaryShardException(StreamInput in) throws IOException {
super(in);
}
} }
} }

View File

@ -43,6 +43,7 @@ import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Optional;
import java.util.function.Predicate; import java.util.function.Predicate;
/** /**
@ -137,6 +138,13 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
return shard; return shard;
} }
public IndexShardRoutingTable shardRoutingTableOrNull(ShardId shardId) {
return Optional
.ofNullable(index(shardId.getIndexName()))
.flatMap(irt -> Optional.ofNullable(irt.shard(shardId.getId())))
.orElse(null);
}
public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException { public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException {
RoutingTableValidation validation = validate(metaData); RoutingTableValidation validation = validate(metaData);
if (!validation.valid()) { if (!validation.valid()) {

View File

@ -100,8 +100,9 @@ public abstract class CheckFileCommand extends CliTool.Command {
Set<PosixFilePermission> permissionsBeforeWrite = entry.getValue(); Set<PosixFilePermission> permissionsBeforeWrite = entry.getValue();
Set<PosixFilePermission> permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey()); Set<PosixFilePermission> permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey());
if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) { if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) {
terminal.printWarn("The file permissions of [%s] have changed from [%s] to [%s]", terminal.printWarn("The file permissions of [" + entry.getKey() + "] have changed "
entry.getKey(), PosixFilePermissions.toString(permissionsBeforeWrite), PosixFilePermissions.toString(permissionsAfterWrite)); + "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] "
+ "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]");
terminal.printWarn("Please ensure that the user account running Elasticsearch has read access to this file!"); terminal.printWarn("Please ensure that the user account running Elasticsearch has read access to this file!");
} }
} }
@ -115,7 +116,7 @@ public abstract class CheckFileCommand extends CliTool.Command {
String ownerBeforeWrite = entry.getValue(); String ownerBeforeWrite = entry.getValue();
String ownerAfterWrite = Files.getOwner(entry.getKey()).getName(); String ownerAfterWrite = Files.getOwner(entry.getKey()).getName();
if (!ownerAfterWrite.equals(ownerBeforeWrite)) { if (!ownerAfterWrite.equals(ownerBeforeWrite)) {
terminal.printWarn("WARN: Owner of file [%s] used to be [%s], but now is [%s]", entry.getKey(), ownerBeforeWrite, ownerAfterWrite); terminal.printWarn("WARN: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]");
} }
} }
@ -128,7 +129,7 @@ public abstract class CheckFileCommand extends CliTool.Command {
String groupBeforeWrite = entry.getValue(); String groupBeforeWrite = entry.getValue();
String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName(); String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName();
if (!groupAfterWrite.equals(groupBeforeWrite)) { if (!groupAfterWrite.equals(groupBeforeWrite)) {
terminal.printWarn("WARN: Group of file [%s] used to be [%s], but now is [%s]", entry.getKey(), groupBeforeWrite, groupAfterWrite); terminal.printWarn("WARN: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]");
} }
} }

View File

@ -19,14 +19,17 @@
package org.elasticsearch.common.cli; package org.elasticsearch.common.cli;
import org.apache.commons.cli.AlreadySelectedException;
import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser; import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.MissingArgumentException;
import org.apache.commons.cli.MissingOptionException;
import org.apache.commons.cli.UnrecognizedOptionException;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.io.IOException;
import java.util.Locale; import java.util.Locale;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
@ -50,7 +53,7 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
public abstract class CliTool { public abstract class CliTool {
// based on sysexits.h // based on sysexits.h
public static enum ExitStatus { public enum ExitStatus {
OK(0), OK(0),
OK_AND_EXIT(0), OK_AND_EXIT(0),
USAGE(64), /* command line usage error */ USAGE(64), /* command line usage error */
@ -69,23 +72,13 @@ public abstract class CliTool {
final int status; final int status;
private ExitStatus(int status) { ExitStatus(int status) {
this.status = status; this.status = status;
} }
public int status() { public int status() {
return status; return status;
} }
public static ExitStatus fromStatus(int status) {
for (ExitStatus exitStatus : values()) {
if (exitStatus.status() == status) {
return exitStatus;
}
}
return null;
}
} }
protected final Terminal terminal; protected final Terminal terminal;
@ -108,7 +101,7 @@ public abstract class CliTool {
settings = env.settings(); settings = env.settings();
} }
public final ExitStatus execute(String... args) { public final ExitStatus execute(String... args) throws Exception {
// first lets see if the user requests tool help. We're doing it only if // first lets see if the user requests tool help. We're doing it only if
// this is a multi-command tool. If it's a single command tool, the -h/--help // this is a multi-command tool. If it's a single command tool, the -h/--help
@ -132,7 +125,7 @@ public abstract class CliTool {
String cmdName = args[0]; String cmdName = args[0];
cmd = config.cmd(cmdName); cmd = config.cmd(cmdName);
if (cmd == null) { if (cmd == null) {
terminal.printError("unknown command [%s]. Use [-h] option to list available commands", cmdName); terminal.printError("unknown command [" + cmdName + "]. Use [-h] option to list available commands");
return ExitStatus.USAGE; return ExitStatus.USAGE;
} }
@ -146,23 +139,11 @@ public abstract class CliTool {
} }
} }
Command command = null;
try { try {
return parse(cmd, args).execute(settings, env);
command = parse(cmd, args); } catch (UserError error) {
return command.execute(settings, env); terminal.printError(error.getMessage());
} catch (IOException ioe) { return error.exitStatus;
terminal.printError(ioe);
return ExitStatus.IO_ERROR;
} catch (IllegalArgumentException ilae) {
terminal.printError(ilae);
return ExitStatus.USAGE;
} catch (Throwable t) {
terminal.printError(t);
if (command == null) {
return ExitStatus.USAGE;
}
return ExitStatus.CODE_ERROR;
} }
} }
@ -177,7 +158,13 @@ public abstract class CliTool {
if (cli.hasOption("h")) { if (cli.hasOption("h")) {
return helpCmd(cmd); return helpCmd(cmd);
} }
try {
cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption()); cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption());
} catch (AlreadySelectedException|MissingArgumentException|MissingOptionException|UnrecognizedOptionException e) {
// intentionally drop the stack trace here as these are really user errors,
// the stack trace into cli parsing lib is not important
throw new UserError(ExitStatus.USAGE, e.toString());
}
Terminal.Verbosity verbosity = Terminal.Verbosity.resolve(cli); Terminal.Verbosity verbosity = Terminal.Verbosity.resolve(cli);
terminal.verbosity(verbosity); terminal.verbosity(verbosity);
return parse(cmd.name(), cli); return parse(cmd.name(), cli);

View File

@ -50,7 +50,7 @@ public class HelpPrinter {
} }
}); });
} catch (IOException ioe) { } catch (IOException ioe) {
ioe.printStackTrace(terminal.writer()); throw new RuntimeException(ioe);
} }
terminal.println(); terminal.println();
} }

View File

@ -35,8 +35,6 @@ import java.util.Locale;
@SuppressForbidden(reason = "System#out") @SuppressForbidden(reason = "System#out")
public abstract class Terminal { public abstract class Terminal {
public static final String DEBUG_SYSTEM_PROPERTY = "es.cli.debug";
public static final Terminal DEFAULT = ConsoleTerminal.supported() ? new ConsoleTerminal() : new SystemTerminal(); public static final Terminal DEFAULT = ConsoleTerminal.supported() ? new ConsoleTerminal() : new SystemTerminal();
public static enum Verbosity { public static enum Verbosity {
@ -64,7 +62,6 @@ public abstract class Terminal {
} }
private Verbosity verbosity = Verbosity.NORMAL; private Verbosity verbosity = Verbosity.NORMAL;
private final boolean isDebugEnabled;
public Terminal() { public Terminal() {
this(Verbosity.NORMAL); this(Verbosity.NORMAL);
@ -72,7 +69,6 @@ public abstract class Terminal {
public Terminal(Verbosity verbosity) { public Terminal(Verbosity verbosity) {
this.verbosity = verbosity; this.verbosity = verbosity;
this.isDebugEnabled = "true".equals(System.getProperty(DEBUG_SYSTEM_PROPERTY, "false"));
} }
public void verbosity(Verbosity verbosity) { public void verbosity(Verbosity verbosity) {
@ -93,46 +89,37 @@ public abstract class Terminal {
println(Verbosity.NORMAL); println(Verbosity.NORMAL);
} }
public void println(String msg, Object... args) { public void println(String msg) {
println(Verbosity.NORMAL, msg, args); println(Verbosity.NORMAL, msg);
} }
public void print(String msg, Object... args) { public void print(String msg) {
print(Verbosity.NORMAL, msg, args); print(Verbosity.NORMAL, msg);
} }
public void println(Verbosity verbosity) { public void println(Verbosity verbosity) {
println(verbosity, ""); println(verbosity, "");
} }
public void println(Verbosity verbosity, String msg, Object... args) { public void println(Verbosity verbosity, String msg) {
print(verbosity, msg + System.lineSeparator(), args); print(verbosity, msg + System.lineSeparator());
} }
public void print(Verbosity verbosity, String msg, Object... args) { public void print(Verbosity verbosity, String msg) {
if (this.verbosity.enabled(verbosity)) { if (this.verbosity.enabled(verbosity)) {
doPrint(msg, args); doPrint(msg);
} }
} }
public void printError(String msg, Object... args) { public void printError(String msg) {
println(Verbosity.SILENT, "ERROR: " + msg, args); println(Verbosity.SILENT, "ERROR: " + msg);
} }
public void printError(Throwable t) { public void printWarn(String msg) {
printError("%s", t.toString()); println(Verbosity.SILENT, "WARN: " + msg);
if (isDebugEnabled) {
printStackTrace(t);
}
} }
public void printWarn(String msg, Object... args) { protected abstract void doPrint(String msg);
println(Verbosity.SILENT, "WARN: " + msg, args);
}
protected abstract void doPrint(String msg, Object... args);
public abstract PrintWriter writer();
private static class ConsoleTerminal extends Terminal { private static class ConsoleTerminal extends Terminal {
@ -143,8 +130,8 @@ public abstract class Terminal {
} }
@Override @Override
public void doPrint(String msg, Object... args) { public void doPrint(String msg) {
console.printf(msg, args); console.printf("%s", msg);
console.flush(); console.flush();
} }
@ -158,11 +145,6 @@ public abstract class Terminal {
return console.readPassword(text, args); return console.readPassword(text, args);
} }
@Override
public PrintWriter writer() {
return console.writer();
}
@Override @Override
public void printStackTrace(Throwable t) { public void printStackTrace(Throwable t) {
t.printStackTrace(console.writer()); t.printStackTrace(console.writer());
@ -175,13 +157,13 @@ public abstract class Terminal {
private final PrintWriter printWriter = new PrintWriter(System.out); private final PrintWriter printWriter = new PrintWriter(System.out);
@Override @Override
public void doPrint(String msg, Object... args) { public void doPrint(String msg) {
System.out.print(String.format(Locale.ROOT, msg, args)); System.out.print(msg);
} }
@Override @Override
public String readText(String text, Object... args) { public String readText(String text, Object... args) {
print(text, args); print(text);
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
try { try {
return reader.readLine(); return reader.readLine();
@ -199,10 +181,5 @@ public abstract class Terminal {
public void printStackTrace(Throwable t) { public void printStackTrace(Throwable t) {
t.printStackTrace(printWriter); t.printStackTrace(printWriter);
} }
@Override
public PrintWriter writer() {
return printWriter;
}
} }
} }

View File

@ -17,19 +17,19 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.plugins.loading.classpath; package org.elasticsearch.common.cli;
import org.elasticsearch.plugins.Plugin; /**
* An exception representing a user fixable problem in {@link CliTool} usage.
*/
public class UserError extends Exception {
public class InClassPathPlugin extends Plugin { /** The exist status the cli should use when catching this user error. */
public final CliTool.ExitStatus exitStatus;
@Override /** Constructs a UserError with an exit status and message to show the user. */
public String name() { public UserError(CliTool.ExitStatus exitStatus, String msg) {
return "in-classpath-plugin"; super(msg);
} this.exitStatus = exitStatus;
@Override
public String description() {
return "A plugin defined in class path";
} }
} }

View File

@ -1,488 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.http.client;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchCorruptionException;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.hash.MessageDigests;
import org.elasticsearch.common.unit.TimeValue;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.attribute.FileTime;
import java.util.List;
/**
*
*/
public class HttpDownloadHelper {
private boolean useTimestamp = false;
private boolean skipExisting = false;
public boolean download(URL source, Path dest, @Nullable DownloadProgress progress, TimeValue timeout) throws Exception {
if (Files.exists(dest) && skipExisting) {
return true;
}
//don't do any progress, unless asked
if (progress == null) {
progress = new NullProgress();
}
//set the timestamp to the file date.
long timestamp = 0;
boolean hasTimestamp = false;
if (useTimestamp && Files.exists(dest) ) {
timestamp = Files.getLastModifiedTime(dest).toMillis();
hasTimestamp = true;
}
GetThread getThread = new GetThread(source, dest, hasTimestamp, timestamp, progress);
try {
getThread.setDaemon(true);
getThread.start();
getThread.join(timeout.millis());
if (getThread.isAlive()) {
throw new ElasticsearchTimeoutException("The GET operation took longer than " + timeout + ", stopping it.");
}
}
catch (InterruptedException ie) {
return false;
} finally {
getThread.closeStreams();
}
return getThread.wasSuccessful();
}
public interface Checksummer {
/** Return the hex string for the given byte array */
String checksum(byte[] filebytes);
/** Human-readable name for the checksum format */
String name();
}
/** Checksummer for SHA1 */
public static Checksummer SHA1_CHECKSUM = new Checksummer() {
@Override
public String checksum(byte[] filebytes) {
return MessageDigests.toHexString(MessageDigests.sha1().digest(filebytes));
}
@Override
public String name() {
return "SHA1";
}
};
/** Checksummer for MD5 */
public static Checksummer MD5_CHECKSUM = new Checksummer() {
@Override
public String checksum(byte[] filebytes) {
return MessageDigests.toHexString(MessageDigests.md5().digest(filebytes));
}
@Override
public String name() {
return "MD5";
}
};
/**
* Download the given checksum URL to the destination and check the checksum
* @param checksumURL URL for the checksum file
* @param originalFile original file to calculate checksum of
* @param checksumFile destination to download the checksum file to
* @param hashFunc class used to calculate the checksum of the file
* @return true if the checksum was validated, false if it did not exist
* @throws Exception if the checksum failed to match
*/
public boolean downloadAndVerifyChecksum(URL checksumURL, Path originalFile, Path checksumFile,
@Nullable DownloadProgress progress,
TimeValue timeout, Checksummer hashFunc) throws Exception {
try {
if (download(checksumURL, checksumFile, progress, timeout)) {
byte[] fileBytes = Files.readAllBytes(originalFile);
List<String> checksumLines = Files.readAllLines(checksumFile, StandardCharsets.UTF_8);
if (checksumLines.size() != 1) {
throw new ElasticsearchCorruptionException("invalid format for checksum file (" +
hashFunc.name() + "), expected 1 line, got: " + checksumLines.size());
}
String checksumHex = checksumLines.get(0);
String fileHex = hashFunc.checksum(fileBytes);
if (fileHex.equals(checksumHex) == false) {
throw new ElasticsearchCorruptionException("incorrect hash (" + hashFunc.name() +
"), file hash: [" + fileHex + "], expected: [" + checksumHex + "]");
}
return true;
}
} catch (FileNotFoundException | NoSuchFileException e) {
// checksum file doesn't exist
return false;
} finally {
IOUtils.deleteFilesIgnoringExceptions(checksumFile);
}
return false;
}
/**
* Interface implemented for reporting
* progress of downloading.
*/
public interface DownloadProgress {
/**
* begin a download
*/
void beginDownload();
/**
* tick handler
*/
void onTick();
/**
* end a download
*/
void endDownload();
}
/**
* do nothing with progress info
*/
public static class NullProgress implements DownloadProgress {
/**
* begin a download
*/
@Override
public void beginDownload() {
}
/**
* tick handler
*/
@Override
public void onTick() {
}
/**
* end a download
*/
@Override
public void endDownload() {
}
}
/**
* verbose progress system prints to some output stream
*/
public static class VerboseProgress implements DownloadProgress {
private int dots = 0;
// CheckStyle:VisibilityModifier OFF - bc
PrintWriter writer;
// CheckStyle:VisibilityModifier ON
/**
* Construct a verbose progress reporter.
*
* @param writer the output stream.
*/
public VerboseProgress(PrintWriter writer) {
this.writer = writer;
}
/**
* begin a download
*/
@Override
public void beginDownload() {
writer.print("Downloading ");
dots = 0;
}
/**
* tick handler
*/
@Override
public void onTick() {
writer.print(".");
if (dots++ > 50) {
writer.flush();
dots = 0;
}
}
/**
* end a download
*/
@Override
public void endDownload() {
writer.println("DONE");
writer.flush();
}
}
private class GetThread extends Thread {
private final URL source;
private final Path dest;
private final boolean hasTimestamp;
private final long timestamp;
private final DownloadProgress progress;
private boolean success = false;
private IOException ioexception = null;
private InputStream is = null;
private OutputStream os = null;
private URLConnection connection;
private int redirections = 0;
GetThread(URL source, Path dest, boolean h, long t, DownloadProgress p) {
this.source = source;
this.dest = dest;
hasTimestamp = h;
timestamp = t;
progress = p;
}
@Override
public void run() {
try {
success = get();
} catch (IOException ioex) {
ioexception = ioex;
}
}
private boolean get() throws IOException {
connection = openConnection(source);
if (connection == null) {
return false;
}
boolean downloadSucceeded = downloadFile();
//if (and only if) the use file time option is set, then
//the saved file now has its timestamp set to that of the
//downloaded file
if (downloadSucceeded && useTimestamp) {
updateTimeStamp();
}
return downloadSucceeded;
}
private boolean redirectionAllowed(URL aSource, URL aDest) throws IOException {
// Argh, github does this...
// if (!(aSource.getProtocol().equals(aDest.getProtocol()) || ("http"
// .equals(aSource.getProtocol()) && "https".equals(aDest
// .getProtocol())))) {
// String message = "Redirection detected from "
// + aSource.getProtocol() + " to " + aDest.getProtocol()
// + ". Protocol switch unsafe, not allowed.";
// throw new IOException(message);
// }
redirections++;
if (redirections > 5) {
String message = "More than " + 5 + " times redirected, giving up";
throw new IOException(message);
}
return true;
}
private URLConnection openConnection(URL aSource) throws IOException {
// set up the URL connection
URLConnection connection = aSource.openConnection();
// modify the headers
// NB: things like user authentication could go in here too.
if (hasTimestamp) {
connection.setIfModifiedSince(timestamp);
}
// in case the plugin manager is its own project, this can become an authenticator
boolean isSecureProcotol = "https".equalsIgnoreCase(aSource.getProtocol());
boolean isAuthInfoSet = !Strings.isNullOrEmpty(aSource.getUserInfo());
if (isAuthInfoSet) {
if (!isSecureProcotol) {
throw new IOException("Basic auth is only supported for HTTPS!");
}
String basicAuth = Base64.encodeBytes(aSource.getUserInfo().getBytes(StandardCharsets.UTF_8));
connection.setRequestProperty("Authorization", "Basic " + basicAuth);
}
if (connection instanceof HttpURLConnection) {
((HttpURLConnection) connection).setInstanceFollowRedirects(false);
connection.setUseCaches(true);
connection.setConnectTimeout(5000);
}
connection.setRequestProperty("ES-Version", Version.CURRENT.toString());
connection.setRequestProperty("ES-Build-Hash", Build.CURRENT.shortHash());
connection.setRequestProperty("User-Agent", "elasticsearch-plugin-manager");
// connect to the remote site (may take some time)
connection.connect();
// First check on a 301 / 302 (moved) response (HTTP only)
if (connection instanceof HttpURLConnection) {
HttpURLConnection httpConnection = (HttpURLConnection) connection;
int responseCode = httpConnection.getResponseCode();
if (responseCode == HttpURLConnection.HTTP_MOVED_PERM ||
responseCode == HttpURLConnection.HTTP_MOVED_TEMP ||
responseCode == HttpURLConnection.HTTP_SEE_OTHER) {
String newLocation = httpConnection.getHeaderField("Location");
URL newURL = new URL(newLocation);
if (!redirectionAllowed(aSource, newURL)) {
return null;
}
return openConnection(newURL);
}
// next test for a 304 result (HTTP only)
long lastModified = httpConnection.getLastModified();
if (responseCode == HttpURLConnection.HTTP_NOT_MODIFIED
|| (lastModified != 0 && hasTimestamp && timestamp >= lastModified)) {
// not modified so no file download. just return
// instead and trace out something so the user
// doesn't think that the download happened when it
// didn't
return null;
}
// test for 401 result (HTTP only)
if (responseCode == HttpURLConnection.HTTP_UNAUTHORIZED) {
String message = "HTTP Authorization failure";
throw new IOException(message);
}
}
//REVISIT: at this point even non HTTP connections may
//support the if-modified-since behaviour -we just check
//the date of the content and skip the write if it is not
//newer. Some protocols (FTP) don't include dates, of
//course.
return connection;
}
private boolean downloadFile() throws FileNotFoundException, IOException {
IOException lastEx = null;
for (int i = 0; i < 3; i++) {
// this three attempt trick is to get round quirks in different
// Java implementations. Some of them take a few goes to bind
// property; we ignore the first couple of such failures.
try {
is = connection.getInputStream();
break;
} catch (IOException ex) {
lastEx = ex;
}
}
if (is == null) {
throw lastEx;
}
os = Files.newOutputStream(dest);
progress.beginDownload();
boolean finished = false;
try {
byte[] buffer = new byte[1024 * 100];
int length;
while (!isInterrupted() && (length = is.read(buffer)) >= 0) {
os.write(buffer, 0, length);
progress.onTick();
}
finished = !isInterrupted();
} finally {
if (!finished) {
// we have started to (over)write dest, but failed.
// Try to delete the garbage we'd otherwise leave
// behind.
IOUtils.closeWhileHandlingException(os, is);
IOUtils.deleteFilesIgnoringExceptions(dest);
} else {
IOUtils.close(os, is);
}
}
progress.endDownload();
return true;
}
private void updateTimeStamp() throws IOException {
long remoteTimestamp = connection.getLastModified();
if (remoteTimestamp != 0) {
Files.setLastModifiedTime(dest, FileTime.fromMillis(remoteTimestamp));
}
}
/**
* Has the download completed successfully?
* <p>
* Re-throws any exception caught during executaion.</p>
*/
boolean wasSuccessful() throws IOException {
if (ioexception != null) {
throw ioexception;
}
return success;
}
/**
* Closes streams, interrupts the download, may delete the
* output file.
*/
void closeStreams() throws IOException {
interrupt();
if (success) {
IOUtils.close(is, os);
} else {
IOUtils.closeWhileHandlingException(is, os);
if (dest != null && Files.exists(dest)) {
IOUtils.deleteFilesIgnoringExceptions(dest);
}
}
}
}
}

View File

@ -52,33 +52,6 @@ public final class FileSystemUtils {
private FileSystemUtils() {} // only static methods private FileSystemUtils() {} // only static methods
/**
* Returns <code>true</code> iff a file under the given root has one of the given extensions. This method
* will travers directories recursively and will terminate once any of the extensions was found. This
* methods will not follow any links.
*
* @param root the root directory to travers. Must be a directory
* @param extensions the file extensions to look for
* @return <code>true</code> iff a file under the given root has one of the given extensions, otherwise <code>false</code>
* @throws IOException if an IOException occurs or if the given root path is not a directory.
*/
public static boolean hasExtensions(Path root, final String... extensions) throws IOException {
final AtomicBoolean retVal = new AtomicBoolean(false);
Files.walkFileTree(root, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
for (String extension : extensions) {
if (file.getFileName().toString().endsWith(extension)) {
retVal.set(true);
return FileVisitResult.TERMINATE;
}
}
return super.visitFile(file, attrs);
}
});
return retVal.get();
}
/** /**
* Returns <code>true</code> iff one of the files exists otherwise <code>false</code> * Returns <code>true</code> iff one of the files exists otherwise <code>false</code>
*/ */
@ -168,167 +141,6 @@ public final class FileSystemUtils {
return new BufferedReader(reader); return new BufferedReader(reader);
} }
/**
* This utility copy a full directory content (excluded) under
* a new directory but without overwriting existing files.
*
* When a file already exists in destination dir, the source file is copied under
* destination directory but with a suffix appended if set or source file is ignored
* if suffix is not set (null).
* @param source Source directory (for example /tmp/es/src)
* @param destination Destination directory (destination directory /tmp/es/dst)
* @param suffix When not null, files are copied with a suffix appended to the original name (eg: ".new")
* When null, files are ignored
*/
public static void moveFilesWithoutOverwriting(Path source, final Path destination, final String suffix) throws IOException {
// Create destination dir
Files.createDirectories(destination);
final int configPathRootLevel = source.getNameCount();
// We walk through the file tree from
Files.walkFileTree(source, new SimpleFileVisitor<Path>() {
private Path buildPath(Path path) {
return destination.resolve(path);
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
// We are now in dir. We need to remove root of config files to have a relative path
// If we are not walking in root dir, we might be able to copy its content
// if it does not already exist
if (configPathRootLevel != dir.getNameCount()) {
Path subpath = dir.subpath(configPathRootLevel, dir.getNameCount());
Path path = buildPath(subpath);
if (!Files.exists(path)) {
// We just move the structure to new dir
// we can't do atomic move here since src / dest might be on different mounts?
move(dir, path);
// We just ignore sub files from here
return FileVisitResult.SKIP_SUBTREE;
}
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Path subpath = null;
if (configPathRootLevel != file.getNameCount()) {
subpath = file.subpath(configPathRootLevel, file.getNameCount());
}
Path path = buildPath(subpath);
if (!Files.exists(path)) {
// We just move the new file to new dir
move(file, path);
} else if (suffix != null) {
if (!isSameFile(file, path)) {
// If it already exists we try to copy this new version appending suffix to its name
path = path.resolveSibling(path.getFileName().toString().concat(suffix));
// We just move the file to new dir but with a new name (appended with suffix)
Files.move(file, path, StandardCopyOption.REPLACE_EXISTING);
}
}
return FileVisitResult.CONTINUE;
}
/**
* Compares the content of two paths by comparing them
*/
private boolean isSameFile(Path first, Path second) throws IOException {
// do quick file size comparison before hashing
boolean sameFileSize = Files.size(first) == Files.size(second);
if (!sameFileSize) {
return false;
}
byte[] firstBytes = Files.readAllBytes(first);
byte[] secondBytes = Files.readAllBytes(second);
return Arrays.equals(firstBytes, secondBytes);
}
});
}
/**
* Copy recursively a dir to a new location
* @param source source dir
* @param destination destination dir
*/
public static void copyDirectoryRecursively(Path source, Path destination) throws IOException {
Files.walkFileTree(source, new TreeCopier(source, destination, false));
}
/**
* Move or rename a file to a target file. This method supports moving a file from
* different filesystems (not supported by Files.move()).
*
* @param source source file
* @param destination destination file
*/
public static void move(Path source, Path destination) throws IOException {
try {
// We can't use atomic move here since source & target can be on different filesystems.
Files.move(source, destination);
} catch (DirectoryNotEmptyException e) {
Files.walkFileTree(source, new TreeCopier(source, destination, true));
}
}
// TODO: note that this will fail if source and target are on different NIO.2 filesystems.
static class TreeCopier extends SimpleFileVisitor<Path> {
private final Path source;
private final Path target;
private final boolean delete;
TreeCopier(Path source, Path target, boolean delete) {
this.source = source;
this.target = target;
this.delete = delete;
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
Path newDir = target.resolve(source.relativize(dir));
try {
Files.copy(dir, newDir);
} catch (FileAlreadyExistsException x) {
// We ignore this
} catch (IOException x) {
return SKIP_SUBTREE;
}
return CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
if (delete) {
IOUtils.rm(dir);
}
return CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Path newFile = target.resolve(source.relativize(file));
try {
Files.copy(file, newFile);
if (delete) {
Files.deleteIfExists(file);
}
} catch (IOException x) {
// We ignore this
}
return CONTINUE;
}
}
/** /**
* Returns an array of all files in the given directory matching. * Returns an array of all files in the given directory matching.
*/ */

View File

@ -25,7 +25,7 @@ import org.apache.log4j.spi.LoggingEvent;
import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.cli.Terminal;
/** /**
* TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginManagerCliParser. * TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli.
* */ * */
public class TerminalAppender extends AppenderSkeleton { public class TerminalAppender extends AppenderSkeleton {
@Override @Override

View File

@ -22,6 +22,7 @@ import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterModule;
@ -56,7 +57,7 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.http.netty.NettyHttpServerTransport; import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.analysis.HunspellService; import org.elasticsearch.indices.analysis.HunspellService;
@ -158,7 +159,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
@ -204,11 +204,25 @@ public final class ClusterSettings extends AbstractScopedSettings {
GatewayService.RECOVER_AFTER_NODES_SETTING, GatewayService.RECOVER_AFTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_TIME_SETTING, GatewayService.RECOVER_AFTER_TIME_SETTING,
NetworkModule.HTTP_ENABLED, NetworkModule.HTTP_ENABLED,
NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS, HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
NettyHttpServerTransport.SETTING_CORS_ENABLED, HttpTransportSettings.SETTING_CORS_ENABLED,
NettyHttpServerTransport.SETTING_CORS_MAX_AGE, HttpTransportSettings.SETTING_CORS_MAX_AGE,
NettyHttpServerTransport.SETTING_HTTP_DETAILED_ERRORS_ENABLED, HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
NettyHttpServerTransport.SETTING_PIPELINING, HttpTransportSettings.SETTING_PIPELINING,
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
HttpTransportSettings.SETTING_HTTP_PORT,
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
HttpTransportSettings.SETTING_HTTP_COMPRESSION,
HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL,
HttpTransportSettings.SETTING_CORS_ALLOW_METHODS,
HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS,
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
@ -346,6 +360,12 @@ public final class ClusterSettings extends AbstractScopedSettings {
FsService.REFRESH_INTERVAL_SETTING, FsService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.ENABLED_SETTING, JvmGcMonitorService.ENABLED_SETTING,
JvmGcMonitorService.REFRESH_INTERVAL_SETTING, JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.GC_SETTING JvmGcMonitorService.GC_SETTING,
PageCacheRecycler.LIMIT_HEAP_SETTING,
PageCacheRecycler.WEIGHT_BYTES_SETTING,
PageCacheRecycler.WEIGHT_INT_SETTING,
PageCacheRecycler.WEIGHT_LONG_SETTING,
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
PageCacheRecycler.TYPE_SETTING
))); )));
} }

View File

@ -35,6 +35,10 @@ public class PortsRange {
this.portRange = portRange; this.portRange = portRange;
} }
public String getPortRangeString() {
return portRange;
}
public int[] ports() throws NumberFormatException { public int[] ports() throws NumberFormatException {
final IntArrayList ports = new IntArrayList(); final IntArrayList ports = new IntArrayList();
iterate(new PortCallback() { iterate(new PortCallback() {

View File

@ -0,0 +1,117 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.lease.Releasable;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Container that represents a resource with reference counting capabilities. Provides operations to suspend acquisition of new references.
* This is useful for resource management when resources are intermittently unavailable.
*
* Assumes less than Integer.MAX_VALUE references are concurrently being held at one point in time.
*/
public final class SuspendableRefContainer {
private static final int TOTAL_PERMITS = Integer.MAX_VALUE;
private final Semaphore semaphore;
public SuspendableRefContainer() {
// fair semaphore to ensure that blockAcquisition() does not starve under thread contention
this.semaphore = new Semaphore(TOTAL_PERMITS, true);
}
/**
* Tries acquiring a reference. Returns reference holder if reference acquisition is not blocked at the time of invocation (see
* {@link #blockAcquisition()}). Returns null if reference acquisition is blocked at the time of invocation.
*
* @return reference holder if reference acquisition is not blocked, null otherwise
* @throws InterruptedException if the current thread is interrupted
*/
public Releasable tryAcquire() throws InterruptedException {
if (semaphore.tryAcquire(1, 0, TimeUnit.SECONDS)) { // the untimed tryAcquire methods do not honor the fairness setting
return idempotentRelease(1);
} else {
return null;
}
}
/**
* Acquires a reference. Blocks if reference acquisition is blocked at the time of invocation.
*
* @return reference holder
* @throws InterruptedException if the current thread is interrupted
*/
public Releasable acquire() throws InterruptedException {
semaphore.acquire();
return idempotentRelease(1);
}
/**
* Acquires a reference. Blocks if reference acquisition is blocked at the time of invocation.
*
* @return reference holder
*/
public Releasable acquireUninterruptibly() {
semaphore.acquireUninterruptibly();
return idempotentRelease(1);
}
/**
* Disables reference acquisition and waits until all existing references are released.
* When released, reference acquisition is enabled again.
* This guarantees that between successful acquisition and release, no one is holding a reference.
*
* @return references holder to all references
*/
public Releasable blockAcquisition() {
semaphore.acquireUninterruptibly(TOTAL_PERMITS);
return idempotentRelease(TOTAL_PERMITS);
}
/**
* Helper method that ensures permits are only released once
*
* @return reference holder
*/
private Releasable idempotentRelease(int permits) {
AtomicBoolean closed = new AtomicBoolean();
return () -> {
if (closed.compareAndSet(false, true)) {
semaphore.release(permits);
}
};
}
/**
* Returns the number of references currently being held.
*/
public int activeRefs() {
int availablePermits = semaphore.availablePermits();
if (availablePermits == 0) {
// when blockAcquisition is holding all permits
return 0;
} else {
return TOTAL_PERMITS - availablePermits;
}
}
}

View File

@ -89,7 +89,6 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
*/ */
public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider { public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider {
public final static Setting<Boolean> REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER); public final static Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout", public final static Setting<TimeValue> JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout",
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
@ -142,8 +141,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
private final AtomicBoolean initialStateSent = new AtomicBoolean(); private final AtomicBoolean initialStateSent = new AtomicBoolean();
private volatile boolean rejoinOnMasterGone;
/** counts the time this node has joined the cluster or have elected it self as master */ /** counts the time this node has joined the cluster or have elected it self as master */
private final AtomicLong clusterJoinsCounter = new AtomicLong(); private final AtomicLong clusterJoinsCounter = new AtomicLong();
@ -177,7 +174,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings); this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings);
this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings); this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings);
this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings); this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings);
this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings);
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes); logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
@ -188,7 +184,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]"); throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]");
} }
}); });
clusterSettings.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone);
this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService); this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService);
this.masterFD.addListener(new MasterNodeFailureListener()); this.masterFD.addListener(new MasterNodeFailureListener());
@ -323,10 +318,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return clusterJoinsCounter.get() > 0; return clusterJoinsCounter.get() > 0;
} }
private void setRejoingOnMasterGone(boolean rejoin) {
this.rejoinOnMasterGone = rejoin;
}
/** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */ /** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
@ -670,37 +661,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// flush any pending cluster states from old master, so it will not be set as master again // flush any pending cluster states from old master, so it will not be set as master again
publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("master left [{}]", reason)); publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("master left [{}]", reason));
if (rejoinOnMasterGone) {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")"); return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")");
} }
if (!electMaster.hasEnoughMasterNodes(discoveryNodes)) {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "not enough master nodes after master left (reason = " + reason + ")");
}
final DiscoveryNode electedMaster = electMaster.electMaster(discoveryNodes); // elect master
final DiscoveryNode localNode = currentState.nodes().localNode();
if (localNode.equals(electedMaster)) {
masterFD.stop("got elected as new master since master left (reason = " + reason + ")");
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(localNode.id()).build();
ClusterState newState = ClusterState.builder(currentState).nodes(discoveryNodes).build();
nodesFD.updateNodesAndPing(newState);
return newState;
} else {
nodesFD.stop();
if (electedMaster != null) {
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(electedMaster.id()).build();
masterFD.restart(electedMaster, "possible elected master since master left (reason = " + reason + ")");
return ClusterState.builder(currentState)
.nodes(discoveryNodes)
.build();
} else {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master_left and no other node elected to become master");
}
}
}
@Override @Override
public void onFailure(String source, Throwable t) { public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source); logger.error("unexpected failure during [{}]", t, source);
@ -1109,10 +1072,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
} }
} }
boolean isRejoinOnMasterGone() {
return rejoinOnMasterGone;
}
public static class RejoinClusterRequest extends TransportRequest { public static class RejoinClusterRequest extends TransportRequest {
private String fromNodeId; private String fromNodeId;

View File

@ -330,31 +330,4 @@ public class Environment {
public static FileStore getFileStore(Path path) throws IOException { public static FileStore getFileStore(Path path) throws IOException {
return ESFileStore.getMatchingFileStore(path, fileStores); return ESFileStore.getMatchingFileStore(path, fileStores);
} }
/**
* Returns true if the path is writable.
* Acts just like {@link Files#isWritable(Path)}, except won't
* falsely return false for paths on SUBST'd drive letters
* See https://bugs.openjdk.java.net/browse/JDK-8034057
* Note this will set the file modification time (to its already-set value)
* to test access.
*/
@SuppressForbidden(reason = "works around https://bugs.openjdk.java.net/browse/JDK-8034057")
public static boolean isWritable(Path path) throws IOException {
boolean v = Files.isWritable(path);
if (v || Constants.WINDOWS == false) {
return v;
}
// isWritable returned false on windows, the hack begins!!!!!!
// resetting the modification time is the least destructive/simplest
// way to check for both files and directories, and fails early just
// in getting the current value if file doesn't exist, etc
try {
Files.setLastModifiedTime(path, Files.getLastModifiedTime(path));
return true;
} catch (Throwable e) {
return false;
}
}
} }

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.http;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.transport.PortsRange;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
public final class HttpTransportSettings {
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
public static final Setting<String> SETTING_CORS_ALLOW_ORIGIN = new Setting<String>("http.cors.allow-origin", "", (value) -> value, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER);
public static final Setting<String> SETTING_CORS_ALLOW_METHODS = new Setting<String>("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, false, Scope.CLUSTER);
public static final Setting<String> SETTING_CORS_ALLOW_HEADERS = new Setting<String>("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_HTTP_COMPRESSION = Setting.boolSetting("http.compression", false, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_HTTP_COMPRESSION_LEVEL = Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER);
public static final Setting<PortsRange> SETTING_HTTP_PORT = new Setting<PortsRange>("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", 0, 0, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ;
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
// don't reset cookies by default, since I don't think we really need to
// note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies
public static final Setting<Boolean> SETTING_HTTP_RESET_COOKIES = Setting.boolSetting("http.reset_cookies", false, false, Scope.CLUSTER);
private HttpTransportSettings() {
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.http.netty; package org.elasticsearch.http.netty;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent; import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent;
import org.elasticsearch.rest.support.RestUtils; import org.elasticsearch.rest.support.RestUtils;
import org.jboss.netty.channel.ChannelHandler; import org.jboss.netty.channel.ChannelHandler;
@ -46,7 +47,8 @@ public class HttpRequestHandler extends SimpleChannelUpstreamHandler {
public HttpRequestHandler(NettyHttpServerTransport serverTransport, boolean detailedErrorsEnabled, ThreadContext threadContext) { public HttpRequestHandler(NettyHttpServerTransport serverTransport, boolean detailedErrorsEnabled, ThreadContext threadContext) {
this.serverTransport = serverTransport; this.serverTransport = serverTransport;
this.corsPattern = RestUtils.checkCorsSettingForRegex(serverTransport.settings().get(NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN)); this.corsPattern = RestUtils
.checkCorsSettingForRegex(HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN.get(serverTransport.settings()));
this.httpPipeliningEnabled = serverTransport.pipelining; this.httpPipeliningEnabled = serverTransport.pipelining;
this.detailedErrorsEnabled = detailedErrorsEnabled; this.detailedErrorsEnabled = detailedErrorsEnabled;
this.threadContext = threadContext; this.threadContext = threadContext;

View File

@ -49,12 +49,12 @@ import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_HEADERS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_METHODS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ENABLED; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_MAX_AGE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS; import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS; import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS; import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;
@ -117,7 +117,7 @@ public class NettyHttpChannel extends HttpChannel {
String originHeader = request.header(ORIGIN); String originHeader = request.header(ORIGIN);
if (!Strings.isNullOrEmpty(originHeader)) { if (!Strings.isNullOrEmpty(originHeader)) {
if (corsPattern == null) { if (corsPattern == null) {
String allowedOrigins = transport.settings().get(SETTING_CORS_ALLOW_ORIGIN, null); String allowedOrigins = SETTING_CORS_ALLOW_ORIGIN.get(transport.settings());
if (!Strings.isNullOrEmpty(allowedOrigins)) { if (!Strings.isNullOrEmpty(allowedOrigins)) {
resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, allowedOrigins); resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, allowedOrigins);
} }
@ -128,8 +128,8 @@ public class NettyHttpChannel extends HttpChannel {
if (nettyRequest.getMethod() == HttpMethod.OPTIONS) { if (nettyRequest.getMethod() == HttpMethod.OPTIONS) {
// Allow Ajax requests based on the CORS "preflight" request // Allow Ajax requests based on the CORS "preflight" request
resp.headers().add(ACCESS_CONTROL_MAX_AGE, SETTING_CORS_MAX_AGE.get(transport.settings())); resp.headers().add(ACCESS_CONTROL_MAX_AGE, SETTING_CORS_MAX_AGE.get(transport.settings()));
resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, transport.settings().get(SETTING_CORS_ALLOW_METHODS, "OPTIONS, HEAD, GET, POST, PUT, DELETE")); resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, SETTING_CORS_ALLOW_METHODS.get(transport.settings()));
resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, transport.settings().get(SETTING_CORS_ALLOW_HEADERS, "X-Requested-With, Content-Type, Content-Length")); resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, SETTING_CORS_ALLOW_HEADERS.get(transport.settings()));
} }
if (SETTING_CORS_ALLOW_CREDENTIALS.get(transport.settings())) { if (SETTING_CORS_ALLOW_CREDENTIALS.get(transport.settings())) {

View File

@ -26,8 +26,6 @@ import org.elasticsearch.common.netty.NettyUtils;
import org.elasticsearch.common.netty.OpenChannelsHandler; import org.elasticsearch.common.netty.OpenChannelsHandler;
import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress;
@ -46,6 +44,7 @@ import org.elasticsearch.http.HttpRequest;
import org.elasticsearch.http.HttpServerAdapter; import org.elasticsearch.http.HttpServerAdapter;
import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpStats; import org.elasticsearch.http.HttpStats;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler; import org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -75,7 +74,6 @@ import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING; import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE; import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY; import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
@ -93,22 +91,6 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
NettyUtils.setup(); NettyUtils.setup();
} }
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
public static final String SETTING_CORS_ALLOW_ORIGIN = "http.cors.allow-origin";
public static final Setting<Integer> SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER);
public static final String SETTING_CORS_ALLOW_METHODS = "http.cors.allow-methods";
public static final String SETTING_CORS_ALLOW_HEADERS = "http.cors.allow-headers";
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER);
public static final String SETTING_PIPELINING_MAX_EVENTS = "http.pipelining.max_events";
public static final String SETTING_HTTP_COMPRESSION = "http.compression";
public static final String SETTING_HTTP_COMPRESSION_LEVEL = "http.compression_level";
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
public static final int DEFAULT_SETTING_PIPELINING_MAX_EVENTS = 10000;
public static final String DEFAULT_PORT_RANGE = "9200-9300";
protected final NetworkService networkService; protected final NetworkService networkService;
protected final BigArrays bigArrays; protected final BigArrays bigArrays;
@ -131,7 +113,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
protected final boolean resetCookies; protected final boolean resetCookies;
protected final String port; protected final PortsRange port;
protected final String bindHosts[]; protected final String bindHosts[];
@ -176,28 +158,25 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
if (settings.getAsBoolean("netty.epollBugWorkaround", false)) { if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
System.setProperty("org.jboss.netty.epollBugWorkaround", "true"); System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
} }
ByteSizeValue maxContentLength = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
ByteSizeValue maxContentLength = settings.getAsBytesSize("http.netty.max_content_length", settings.getAsBytesSize("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB))); this.maxChunkSize = HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
this.maxChunkSize = settings.getAsBytesSize("http.netty.max_chunk_size", settings.getAsBytesSize("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB))); this.maxHeaderSize = HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
this.maxHeaderSize = settings.getAsBytesSize("http.netty.max_header_size", settings.getAsBytesSize("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB))); this.maxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
this.maxInitialLineLength = settings.getAsBytesSize("http.netty.max_initial_line_length", settings.getAsBytesSize("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB))); this.resetCookies = HttpTransportSettings.SETTING_HTTP_RESET_COOKIES.get(settings);
// don't reset cookies by default, since I don't think we really need to
// note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies
this.resetCookies = settings.getAsBoolean("http.netty.reset_cookies", settings.getAsBoolean("http.reset_cookies", false));
this.maxCumulationBufferCapacity = settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null); this.maxCumulationBufferCapacity = settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null);
this.maxCompositeBufferComponents = settings.getAsInt("http.netty.max_composite_buffer_components", -1); this.maxCompositeBufferComponents = settings.getAsInt("http.netty.max_composite_buffer_components", -1);
this.workerCount = settings.getAsInt("http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2); this.workerCount = settings.getAsInt("http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", TCP_BLOCKING.get(settings)); this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", TCP_BLOCKING.get(settings));
this.port = settings.get("http.netty.port", settings.get("http.port", DEFAULT_PORT_RANGE)); this.port = HttpTransportSettings.SETTING_HTTP_PORT.get(settings);
this.bindHosts = settings.getAsArray("http.netty.bind_host", settings.getAsArray("http.bind_host", settings.getAsArray("http.host", null))); this.bindHosts = settings.getAsArray("http.netty.bind_host", settings.getAsArray("http.bind_host", settings.getAsArray("http.host", null)));
this.publishHosts = settings.getAsArray("http.netty.publish_host", settings.getAsArray("http.publish_host", settings.getAsArray("http.host", null))); this.publishHosts = settings.getAsArray("http.netty.publish_host", settings.getAsArray("http.publish_host", settings.getAsArray("http.host", null)));
this.publishPort = settings.getAsInt("http.netty.publish_port", settings.getAsInt("http.publish_port", 0)); this.publishPort = HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.get(settings);
this.tcpNoDelay = settings.getAsBoolean("http.netty.tcp_no_delay", TCP_NO_DELAY.get(settings)); this.tcpNoDelay = settings.getAsBoolean("http.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
this.tcpKeepAlive = settings.getAsBoolean("http.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings)); this.tcpKeepAlive = settings.getAsBoolean("http.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings)); this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings)); this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings)); this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings); this.detailedErrorsEnabled = HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
long defaultReceiverPredictor = 512 * 1024; long defaultReceiverPredictor = 512 * 1024;
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
@ -215,10 +194,10 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes()); receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
} }
this.compression = settings.getAsBoolean(SETTING_HTTP_COMPRESSION, false); this.compression = HttpTransportSettings.SETTING_HTTP_COMPRESSION.get(settings);
this.compressionLevel = settings.getAsInt(SETTING_HTTP_COMPRESSION_LEVEL, 6); this.compressionLevel = HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL.get(settings);
this.pipelining = SETTING_PIPELINING.get(settings); this.pipelining = HttpTransportSettings.SETTING_PIPELINING.get(settings);
this.pipeliningMaxEvents = settings.getAsInt(SETTING_PIPELINING_MAX_EVENTS, DEFAULT_SETTING_PIPELINING_MAX_EVENTS); this.pipeliningMaxEvents = HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS.get(settings);
// validate max content length // validate max content length
if (maxContentLength.bytes() > Integer.MAX_VALUE) { if (maxContentLength.bytes() > Integer.MAX_VALUE) {
@ -312,10 +291,9 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
} }
private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) { private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) {
PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<>(); final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>(); final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
boolean success = portsRange.iterate(new PortsRange.PortCallback() { boolean success = port.iterate(new PortsRange.PortCallback() {
@Override @Override
public boolean onPortNumber(int portNumber) { public boolean onPortNumber(int portNumber) {
try { try {

View File

@ -42,17 +42,17 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.SuspendableRefContainer;
import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -189,9 +189,17 @@ public class IndexShard extends AbstractIndexShardComponent {
private final ShardPath path; private final ShardPath path;
private final IndexShardOperationCounter indexShardOperationCounter; private final SuspendableRefContainer suspendableRefContainer;
private final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY); private static final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY);
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be
// in state RECOVERING or POST_RECOVERY. After a primary has been marked as RELOCATED, we only allow writes to the relocation target
// which can be either in POST_RECOVERY or already STARTED (this prevents writing concurrently to two primaries).
public static final EnumSet<IndexShardState> writeAllowedStatesForPrimary = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED);
// replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent
// a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source
private static final EnumSet<IndexShardState> writeAllowedStatesForReplica = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED);
private final IndexSearcherWrapper searcherWrapper; private final IndexSearcherWrapper searcherWrapper;
@ -250,7 +258,7 @@ public class IndexShard extends AbstractIndexShardComponent {
} }
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy); this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId); this.suspendableRefContainer = new SuspendableRefContainer();
this.provider = provider; this.provider = provider;
this.searcherWrapper = indexSearcherWrapper; this.searcherWrapper = indexSearcherWrapper;
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, newQueryShardContext()); this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, newQueryShardContext());
@ -321,6 +329,8 @@ public class IndexShard extends AbstractIndexShardComponent {
* Updates the shards routing entry. This mutate the shards internal state depending * Updates the shards routing entry. This mutate the shards internal state depending
* on the changes that get introduced by the new routing value. This method will persist shard level metadata * on the changes that get introduced by the new routing value. This method will persist shard level metadata
* unless explicitly disabled. * unless explicitly disabled.
*
* @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted
*/ */
public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) { public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) {
final ShardRouting currentRouting = this.shardRouting; final ShardRouting currentRouting = this.shardRouting;
@ -368,6 +378,14 @@ public class IndexShard extends AbstractIndexShardComponent {
} }
} }
} }
if (state == IndexShardState.RELOCATED &&
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
// if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery
// failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two
// active primaries.
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
}
this.shardRouting = newRouting; this.shardRouting = newRouting;
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting); indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
} finally { } finally {
@ -404,12 +422,16 @@ public class IndexShard extends AbstractIndexShardComponent {
} }
public IndexShard relocated(String reason) throws IndexShardNotStartedException { public IndexShard relocated(String reason) throws IndexShardNotStartedException {
try (Releasable block = suspendableRefContainer.blockAcquisition()) {
// no shard operation locks are being held here, move state from started to relocated
synchronized (mutex) { synchronized (mutex) {
if (state != IndexShardState.STARTED) { if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state); throw new IndexShardNotStartedException(shardId, state);
} }
changeState(IndexShardState.RELOCATED, reason); changeState(IndexShardState.RELOCATED, reason);
} }
}
return this; return this;
} }
@ -796,7 +818,6 @@ public class IndexShard extends AbstractIndexShardComponent {
refreshScheduledFuture = null; refreshScheduledFuture = null;
} }
changeState(IndexShardState.CLOSED, reason); changeState(IndexShardState.CLOSED, reason);
indexShardOperationCounter.decRef();
} finally { } finally {
final Engine engine = this.currentEngineReference.getAndSet(null); final Engine engine = this.currentEngineReference.getAndSet(null);
try { try {
@ -810,7 +831,6 @@ public class IndexShard extends AbstractIndexShardComponent {
} }
} }
public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) { if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
refresh("percolator_load_queries"); refresh("percolator_load_queries");
@ -967,16 +987,17 @@ public class IndexShard extends AbstractIndexShardComponent {
IndexShardState state = this.state; // one time volatile read IndexShardState state = this.state; // one time volatile read
if (origin == Engine.Operation.Origin.PRIMARY) { if (origin == Engine.Operation.Origin.PRIMARY) {
// for primaries, we only allow to write when actually started (so the cluster has decided we started) if (writeAllowedStatesForPrimary.contains(state) == false) {
// otherwise, we need to retry, we also want to still allow to index if we are relocated in case it fails throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForPrimary + ", origin [" + origin + "]");
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) { }
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]"); } else if (origin == Engine.Operation.Origin.RECOVERY) {
if (state != IndexShardState.RECOVERING) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]");
} }
} else { } else {
// for replicas, we allow to write also while recovering, since we index also during recovery to replicas assert origin == Engine.Operation.Origin.REPLICA;
// and rely on version checks to make sure its consistent if (writeAllowedStatesForReplica.contains(state) == false) {
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) { throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForReplica + ", origin [" + origin + "]");
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
} }
} }
} }
@ -995,7 +1016,7 @@ public class IndexShard extends AbstractIndexShardComponent {
private void verifyNotClosed(Throwable suppressed) throws IllegalIndexShardStateException { private void verifyNotClosed(Throwable suppressed) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read IndexShardState state = this.state; // one time volatile read
if (state == IndexShardState.CLOSED) { if (state == IndexShardState.CLOSED) {
final IllegalIndexShardStateException exc = new IllegalIndexShardStateException(shardId, state, "operation only allowed when not closed"); final IllegalIndexShardStateException exc = new IndexShardClosedException(shardId, "operation only allowed when not closed");
if (suppressed != null) { if (suppressed != null) {
exc.addSuppressed(suppressed); exc.addSuppressed(suppressed);
} }
@ -1390,37 +1411,21 @@ public class IndexShard extends AbstractIndexShardComponent {
idxSettings.getSettings().getAsTime(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING, IndexingMemoryController.SHARD_DEFAULT_INACTIVE_TIME)); idxSettings.getSettings().getAsTime(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING, IndexingMemoryController.SHARD_DEFAULT_INACTIVE_TIME));
} }
private static class IndexShardOperationCounter extends AbstractRefCounted { public Releasable acquirePrimaryOperationLock() {
final private ESLogger logger; verifyNotClosed();
private final ShardId shardId; if (shardRouting.primary() == false) {
throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary");
public IndexShardOperationCounter(ESLogger logger, ShardId shardId) { }
super("index-shard-operations-counter"); return suspendableRefContainer.acquireUninterruptibly();
this.logger = logger;
this.shardId = shardId;
} }
@Override public Releasable acquireReplicaOperationLock() {
protected void closeInternal() { verifyNotClosed();
logger.debug("operations counter reached 0, will not accept any further writes"); return suspendableRefContainer.acquireUninterruptibly();
} }
@Override public int getActiveOperationsCount() {
protected void alreadyClosed() { return suspendableRefContainer.activeRefs(); // refCount is incremented on creation and decremented on close
throw new IndexShardClosedException(shardId, "could not increment operation counter. shard is closed.");
}
}
public void incrementOperationCounter() {
indexShardOperationCounter.incRef();
}
public void decrementOperationCounter() {
indexShardOperationCounter.decRef();
}
public int getOperationsCount() {
return Math.max(0, indexShardOperationCounter.refCount() - 1); // refCount is incremented on creation and decremented on close
} }
/** /**

View File

@ -29,7 +29,11 @@ import java.io.IOException;
public class IndexShardRelocatedException extends IllegalIndexShardStateException { public class IndexShardRelocatedException extends IllegalIndexShardStateException {
public IndexShardRelocatedException(ShardId shardId) { public IndexShardRelocatedException(ShardId shardId) {
super(shardId, IndexShardState.RELOCATED, "Already relocated"); this(shardId, "Already relocated");
}
public IndexShardRelocatedException(ShardId shardId, String reason) {
super(shardId, IndexShardState.RELOCATED, reason);
} }
public IndexShardRelocatedException(StreamInput in) throws IOException{ public IndexShardRelocatedException(StreamInput in) throws IOException{

View File

@ -76,7 +76,7 @@ public class ShardId implements Streamable, Comparable<ShardId> {
if (this == o) return true; if (this == o) return true;
if (o == null) return false; if (o == null) return false;
ShardId shardId1 = (ShardId) o; ShardId shardId1 = (ShardId) o;
return shardId == shardId1.shardId && index.getName().equals(shardId1.index.getName()); return shardId == shardId1.shardId && index.equals(shardId1.index);
} }
@Override @Override
@ -112,7 +112,11 @@ public class ShardId implements Streamable, Comparable<ShardId> {
@Override @Override
public int compareTo(ShardId o) { public int compareTo(ShardId o) {
if (o.getId() == shardId) { if (o.getId() == shardId) {
return index.getName().compareTo(o.getIndex().getName()); int compare = index.getName().compareTo(o.getIndex().getName());
if (compare != 0) {
return compare;
}
return index.getUUID().compareTo(o.getIndex().getUUID());
} }
return Integer.compare(shardId, o.getId()); return Integer.compare(shardId, o.getId());
} }

View File

@ -39,13 +39,11 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
@ -93,26 +91,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private static final ShardStateAction.Listener SHARD_STATE_ACTION_LISTENER = new ShardStateAction.Listener() {}; private static final ShardStateAction.Listener SHARD_STATE_ACTION_LISTENER = new ShardStateAction.Listener() {};
// a map of mappings type we have seen per index due to cluster state
// we need this so we won't remove types automatically created as part of the indexing process
private final ConcurrentMap<Tuple<String, String>, Boolean> seenMappings = ConcurrentCollections.newConcurrentMap();
// a list of shards that failed during recovery // a list of shards that failed during recovery
// we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update // we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update
private final ConcurrentMap<ShardId, FailedShard> failedShards = ConcurrentCollections.newConcurrentMap(); private final ConcurrentMap<ShardId, ShardRouting> failedShards = ConcurrentCollections.newConcurrentMap();
private final RestoreService restoreService; private final RestoreService restoreService;
private final RepositoriesService repositoriesService; private final RepositoriesService repositoriesService;
static class FailedShard {
public final long version;
public final long timestamp;
FailedShard(long version) {
this.version = version;
this.timestamp = System.currentTimeMillis();
}
}
private final Object mutex = new Object(); private final Object mutex = new Object();
private final FailedShardHandler failedShardHandler = new FailedShardHandler(); private final FailedShardHandler failedShardHandler = new FailedShardHandler();
@ -322,7 +306,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
try { try {
indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener); indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener);
} catch (Throwable e) { } catch (Throwable e) {
sendFailShard(shard, indexMetaData.getIndexUUID(), "failed to create index", e); sendFailShard(shard, "failed to create index", e);
} }
} }
} }
@ -387,7 +371,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// so this failure typically means wrong node level configuration or something similar // so this failure typically means wrong node level configuration or something similar
for (IndexShard indexShard : indexService) { for (IndexShard indexShard : indexService) {
ShardRouting shardRouting = indexShard.routingEntry(); ShardRouting shardRouting = indexShard.routingEntry();
failAndRemoveShard(shardRouting, indexService.indexUUID(), indexService, true, "failed to update mappings", t); failAndRemoveShard(shardRouting, indexService, true, "failed to update mappings", t);
} }
} }
} }
@ -436,6 +420,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
failedShards.clear(); failedShards.clear();
return; return;
} }
DiscoveryNodes nodes = event.state().nodes(); DiscoveryNodes nodes = event.state().nodes();
for (final ShardRouting shardRouting : routingNode) { for (final ShardRouting shardRouting : routingNode) {
@ -455,12 +440,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (!indexService.hasShard(shardId) && shardRouting.started()) { if (!indexService.hasShard(shardId) && shardRouting.started()) {
if (failedShards.containsKey(shardRouting.shardId())) { if (failedShards.containsKey(shardRouting.shardId())) {
if (nodes.masterNode() != null) { if (nodes.masterNode() != null) {
shardStateAction.resendShardFailed(shardRouting, indexMetaData.getIndexUUID(), String message = "master " + nodes.masterNode() + " marked shard as started, but shard has previous failed. resending shard failure";
"master " + nodes.masterNode() + " marked shard as started, but shard has previous failed. resending shard failure.", null, SHARD_STATE_ACTION_LISTENER); logger.trace("[{}] re-sending failed shard [{}], reason [{}]", shardRouting.shardId(), shardRouting, message);
shardStateAction.shardFailed(shardRouting, shardRouting, message, null, SHARD_STATE_ACTION_LISTENER);
} }
} else { } else {
// the master thinks we are started, but we don't have this shard at all, mark it as failed // the master thinks we are started, but we don't have this shard at all, mark it as failed
sendFailShard(shardRouting, indexMetaData.getIndexUUID(), "master [" + nodes.masterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null); sendFailShard(shardRouting, "master [" + nodes.masterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null);
} }
continue; continue;
} }
@ -492,7 +478,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// shadow replicas do not support primary promotion. The master would reinitialize the shard, giving it a new allocation, meaning we should be there. // shadow replicas do not support primary promotion. The master would reinitialize the shard, giving it a new allocation, meaning we should be there.
assert (shardRouting.primary() && currentRoutingEntry.primary() == false) == false || indexShard.allowsPrimaryPromotion() : assert (shardRouting.primary() && currentRoutingEntry.primary() == false) == false || indexShard.allowsPrimaryPromotion() :
"shard for doesn't support primary promotion but master promoted it with changing allocation. New routing " + shardRouting + ", current routing " + currentRoutingEntry; "shard for doesn't support primary promotion but master promoted it with changing allocation. New routing " + shardRouting + ", current routing " + currentRoutingEntry;
try {
indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false); indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false);
} catch (Throwable e) {
failAndRemoveShard(shardRouting, indexService, true, "failed updating shard routing entry", e);
}
} }
} }
@ -503,40 +493,29 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} }
private void cleanFailedShards(final ClusterChangedEvent event) { private void cleanFailedShards(final ClusterChangedEvent event) {
RoutingTable routingTable = event.state().routingTable();
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId()); RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
if (routingNode == null) { if (routingNode == null) {
failedShards.clear(); failedShards.clear();
return; return;
} }
DiscoveryNodes nodes = event.state().nodes(); RoutingTable routingTable = event.state().routingTable();
long now = System.currentTimeMillis(); for (Iterator<Map.Entry<ShardId, ShardRouting>> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) {
String localNodeId = nodes.localNodeId(); Map.Entry<ShardId, ShardRouting> entry = iterator.next();
Iterator<Map.Entry<ShardId, FailedShard>> iterator = failedShards.entrySet().iterator(); ShardId failedShardId = entry.getKey();
shards: ShardRouting failedShardRouting = entry.getValue();
while (iterator.hasNext()) { IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex());
Map.Entry<ShardId, FailedShard> entry = iterator.next(); if (indexRoutingTable == null) {
FailedShard failedShard = entry.getValue();
IndexRoutingTable indexRoutingTable = routingTable.index(entry.getKey().getIndex());
if (indexRoutingTable != null) {
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(entry.getKey().id());
if (shardRoutingTable != null) {
for (ShardRouting shardRouting : shardRoutingTable.assignedShards()) {
if (localNodeId.equals(shardRouting.currentNodeId())) {
// we have a timeout here just to make sure we don't have dangled failed shards for some reason
// its just another safely layer
if (shardRouting.version() == failedShard.version && ((now - failedShard.timestamp) < TimeValue.timeValueMinutes(60).millis())) {
// It's the same failed shard - keep it if it hasn't timed out
continue shards;
} else {
// Different version or expired, remove it
break;
}
}
}
}
}
iterator.remove(); iterator.remove();
continue;
}
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id());
if (shardRoutingTable == null) {
iterator.remove();
continue;
}
if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) {
iterator.remove();
}
} }
} }
@ -561,7 +540,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
indexShard.shardId(), indexShard.state(), nodes.masterNode()); indexShard.shardId(), indexShard.state(), nodes.masterNode());
} }
if (nodes.masterNode() != null) { if (nodes.masterNode() != null) {
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), shardStateAction.shardStarted(shardRouting,
"master " + nodes.masterNode() + " marked shard as initializing, but shard state is [" + indexShard.state() + "], mark shard as started", "master " + nodes.masterNode() + " marked shard as initializing, but shard state is [" + indexShard.state() + "], mark shard as started",
SHARD_STATE_ACTION_LISTENER); SHARD_STATE_ACTION_LISTENER);
} }
@ -588,8 +567,9 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (!indexService.hasShard(shardId)) { if (!indexService.hasShard(shardId)) {
if (failedShards.containsKey(shardRouting.shardId())) { if (failedShards.containsKey(shardRouting.shardId())) {
if (nodes.masterNode() != null) { if (nodes.masterNode() != null) {
shardStateAction.resendShardFailed(shardRouting, indexMetaData.getIndexUUID(), String message = "master " + nodes.masterNode() + " marked shard as initializing, but shard is marked as failed, resend shard failure";
"master " + nodes.masterNode() + " marked shard as initializing, but shard is marked as failed, resend shard failure", null, SHARD_STATE_ACTION_LISTENER); logger.trace("[{}] re-sending failed shard [{}], reason [{}]", shardRouting.shardId(), shardRouting, message);
shardStateAction.shardFailed(shardRouting, shardRouting, message, null, SHARD_STATE_ACTION_LISTENER);
} }
return; return;
} }
@ -602,7 +582,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} catch (IndexShardAlreadyExistsException e) { } catch (IndexShardAlreadyExistsException e) {
// ignore this, the method call can happen several times // ignore this, the method call can happen several times
} catch (Throwable e) { } catch (Throwable e) {
failAndRemoveShard(shardRouting, indexService.indexUUID(), indexService, true, "failed to create shard", e); failAndRemoveShard(shardRouting, indexService, true, "failed to create shard", e);
return; return;
} }
} }
@ -626,7 +606,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// For primaries: requests in any case are routed to both when its relocating and that way we handle // For primaries: requests in any case are routed to both when its relocating and that way we handle
// the edge case where its mark as relocated, and we might need to roll it back... // the edge case where its mark as relocated, and we might need to roll it back...
// For replicas: we are recovering a backup from a primary // For replicas: we are recovering a backup from a primary
RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.RELOCATION : RecoveryState.Type.REPLICA; RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.PRIMARY_RELOCATION : RecoveryState.Type.REPLICA;
RecoveryState recoveryState = new RecoveryState(indexShard.shardId(), shardRouting.primary(), type, sourceNode, nodes.localNode()); RecoveryState recoveryState = new RecoveryState(indexShard.shardId(), shardRouting.primary(), type, sourceNode, nodes.localNode());
indexShard.markAsRecovering("from " + sourceNode, recoveryState); indexShard.markAsRecovering("from " + sourceNode, recoveryState);
recoveryTarget.startRecovery(indexShard, type, sourceNode, new PeerRecoveryListener(shardRouting, indexService, indexMetaData)); recoveryTarget.startRecovery(indexShard, type, sourceNode, new PeerRecoveryListener(shardRouting, indexService, indexMetaData));
@ -644,7 +624,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
threadPool.generic().execute(() -> { threadPool.generic().execute(() -> {
try { try {
if (indexShard.recoverFromStore(nodes.localNode())) { if (indexShard.recoverFromStore(nodes.localNode())) {
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery from store", SHARD_STATE_ACTION_LISTENER); shardStateAction.shardStarted(shardRouting, "after recovery from store", SHARD_STATE_ACTION_LISTENER);
} }
} catch (Throwable t) { } catch (Throwable t) {
handleRecoveryFailure(indexService, shardRouting, true, t); handleRecoveryFailure(indexService, shardRouting, true, t);
@ -662,7 +642,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
final IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshotId().getRepository()); final IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshotId().getRepository());
if (indexShard.restoreFromRepository(indexShardRepository, nodes.localNode())) { if (indexShard.restoreFromRepository(indexShardRepository, nodes.localNode())) {
restoreService.indexShardRestoreCompleted(restoreSource.snapshotId(), sId); restoreService.indexShardRestoreCompleted(restoreSource.snapshotId(), sId);
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery from repository", SHARD_STATE_ACTION_LISTENER); shardStateAction.shardStarted(shardRouting, "after recovery from repository", SHARD_STATE_ACTION_LISTENER);
} }
} catch (Throwable first) { } catch (Throwable first) {
try { try {
@ -732,7 +712,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
@Override @Override
public void onRecoveryDone(RecoveryState state) { public void onRecoveryDone(RecoveryState state) {
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery (replica) from node [" + state.getSourceNode() + "]", SHARD_STATE_ACTION_LISTENER); shardStateAction.shardStarted(shardRouting, "after recovery (replica) from node [" + state.getSourceNode() + "]", SHARD_STATE_ACTION_LISTENER);
} }
@Override @Override
@ -743,7 +723,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private void handleRecoveryFailure(IndexService indexService, ShardRouting shardRouting, boolean sendShardFailure, Throwable failure) { private void handleRecoveryFailure(IndexService indexService, ShardRouting shardRouting, boolean sendShardFailure, Throwable failure) {
synchronized (mutex) { synchronized (mutex) {
failAndRemoveShard(shardRouting, indexService.indexUUID(), indexService, sendShardFailure, "failed recovery", failure); failAndRemoveShard(shardRouting, indexService, sendShardFailure, "failed recovery", failure);
} }
} }
@ -764,7 +744,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} }
private void failAndRemoveShard(ShardRouting shardRouting, String indexUUID, @Nullable IndexService indexService, boolean sendShardFailure, String message, @Nullable Throwable failure) { private void failAndRemoveShard(ShardRouting shardRouting, @Nullable IndexService indexService, boolean sendShardFailure, String message, @Nullable Throwable failure) {
if (indexService != null && indexService.hasShard(shardRouting.getId())) { if (indexService != null && indexService.hasShard(shardRouting.getId())) {
// if the indexService is null we can't remove the shard, that's fine since we might have a failure // if the indexService is null we can't remove the shard, that's fine since we might have a failure
// when the index is remove and then we already removed the index service for that shard... // when the index is remove and then we already removed the index service for that shard...
@ -777,15 +757,15 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} }
} }
if (sendShardFailure) { if (sendShardFailure) {
sendFailShard(shardRouting, indexUUID, message, failure); sendFailShard(shardRouting, message, failure);
} }
} }
private void sendFailShard(ShardRouting shardRouting, String indexUUID, String message, @Nullable Throwable failure) { private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Throwable failure) {
try { try {
logger.warn("[{}] marking and sending shard failed due to [{}]", failure, shardRouting.shardId(), message); logger.warn("[{}] marking and sending shard failed due to [{}]", failure, shardRouting.shardId(), message);
failedShards.put(shardRouting.shardId(), new FailedShard(shardRouting.version())); failedShards.put(shardRouting.shardId(), shardRouting);
shardStateAction.shardFailed(shardRouting, indexUUID, message, failure, SHARD_STATE_ACTION_LISTENER); shardStateAction.shardFailed(shardRouting, shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER);
} catch (Throwable e1) { } catch (Throwable e1) {
logger.warn("[{}][{}] failed to mark shard as failed (because of [{}])", e1, shardRouting.getIndexName(), shardRouting.getId(), message); logger.warn("[{}][{}] failed to mark shard as failed (because of [{}])", e1, shardRouting.getIndexName(), shardRouting.getId(), message);
} }
@ -798,7 +778,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
final ShardRouting shardRouting = shardFailure.routing; final ShardRouting shardRouting = shardFailure.routing;
threadPool.generic().execute(() -> { threadPool.generic().execute(() -> {
synchronized (mutex) { synchronized (mutex) {
failAndRemoveShard(shardRouting, shardFailure.indexUUID, indexService, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause); failAndRemoveShard(shardRouting, indexService, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause);
} }
}); });
} }

View File

@ -435,7 +435,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
if (indexShard.routingEntry().primary() == false) { if (indexShard.routingEntry().primary() == false) {
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard"); throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
} }
int opCount = indexShard.getOperationsCount(); int opCount = indexShard.getActiveOperationsCount();
logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount); logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount);
return new InFlightOpsResponse(opCount); return new InFlightOpsResponse(opCount);
} }

View File

@ -61,8 +61,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
private final ClusterService clusterService; private final ClusterService clusterService;
private final OngoingRecoveres ongoingRecoveries = new OngoingRecoveres(); private final OngoingRecoveries ongoingRecoveries = new OngoingRecoveries();
@Inject @Inject
public RecoverySource(Settings settings, TransportService transportService, IndicesService indicesService, public RecoverySource(Settings settings, TransportService transportService, IndicesService indicesService,
@ -111,7 +110,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
throw new DelayRecoveryException("source node has the state of the target shard to be [" + targetShardRouting.state() + "], expecting to be [initializing]"); throw new DelayRecoveryException("source node has the state of the target shard to be [" + targetShardRouting.state() + "], expecting to be [initializing]");
} }
logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode(), request.markAsRelocated()); logger.trace("[{}][{}] starting recovery to {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode());
final RecoverySourceHandler handler; final RecoverySourceHandler handler;
if (shard.indexSettings().isOnSharedFilesystem()) { if (shard.indexSettings().isOnSharedFilesystem()) {
handler = new SharedFSRecoverySourceHandler(shard, request, recoverySettings, transportService, logger); handler = new SharedFSRecoverySourceHandler(shard, request, recoverySettings, transportService, logger);
@ -134,8 +133,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
} }
} }
private static final class OngoingRecoveries {
private static final class OngoingRecoveres {
private final Map<IndexShard, Set<RecoverySourceHandler>> ongoingRecoveries = new HashMap<>(); private final Map<IndexShard, Set<RecoverySourceHandler>> ongoingRecoveries = new HashMap<>();
synchronized void add(IndexShard shard, RecoverySourceHandler handler) { synchronized void add(IndexShard shard, RecoverySourceHandler handler) {

View File

@ -393,9 +393,11 @@ public class RecoverySourceHandler {
} }
}); });
if (isPrimaryRelocation()) {
if (request.markAsRelocated()) { /**
// TODO what happens if the recovery process fails afterwards, we need to mark this back to started * if the recovery process fails after setting the shard state to RELOCATED, both relocation source and
* target are failed (see {@link IndexShard#updateRoutingEntry}).
*/
try { try {
shard.relocated("to " + request.targetNode()); shard.relocated("to " + request.targetNode());
} catch (IllegalIndexShardStateException e) { } catch (IllegalIndexShardStateException e) {
@ -409,6 +411,10 @@ public class RecoverySourceHandler {
indexName, shardId, request.targetNode(), stopWatch.totalTime()); indexName, shardId, request.targetNode(), stopWatch.totalTime());
} }
protected boolean isPrimaryRelocation() {
return request.recoveryType() == RecoveryState.Type.PRIMARY_RELOCATION;
}
/** /**
* Send the given snapshot's operations to this handler's target node. * Send the given snapshot's operations to this handler's target node.
* <p> * <p>

View File

@ -101,7 +101,7 @@ public class RecoveryState implements ToXContent, Streamable {
STORE((byte) 0), STORE((byte) 0),
SNAPSHOT((byte) 1), SNAPSHOT((byte) 1),
REPLICA((byte) 2), REPLICA((byte) 2),
RELOCATION((byte) 3); PRIMARY_RELOCATION((byte) 3);
private static final Type[] TYPES = new Type[Type.values().length]; private static final Type[] TYPES = new Type[Type.values().length];

View File

@ -138,7 +138,6 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
// create a new recovery status, and process... // create a new recovery status, and process...
final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout()); final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout());
threadPool.generic().execute(new RecoveryRunner(recoveryId)); threadPool.generic().execute(new RecoveryRunner(recoveryId));
} }
protected void retryRecovery(final RecoveryStatus recoveryStatus, final Throwable reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) { protected void retryRecovery(final RecoveryStatus recoveryStatus, final Throwable reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
@ -178,7 +177,7 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
return; return;
} }
final StartRecoveryRequest request = new StartRecoveryRequest(recoveryStatus.shardId(), recoveryStatus.sourceNode(), clusterService.localNode(), final StartRecoveryRequest request = new StartRecoveryRequest(recoveryStatus.shardId(), recoveryStatus.sourceNode(), clusterService.localNode(),
false, metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId()); metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId());
final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>(); final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
try { try {
@ -267,7 +266,6 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source shard is closed", cause), false); onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source shard is closed", cause), false);
return; return;
} }
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, e), true); onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, e), true);
} }
} }

View File

@ -84,8 +84,4 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
return 0; return 0;
} }
private boolean isPrimaryRelocation() {
return request.recoveryType() == RecoveryState.Type.RELOCATION && shard.routingEntry().primary();
}
} }

View File

@ -41,8 +41,6 @@ public class StartRecoveryRequest extends TransportRequest {
private DiscoveryNode targetNode; private DiscoveryNode targetNode;
private boolean markAsRelocated;
private Store.MetadataSnapshot metadataSnapshot; private Store.MetadataSnapshot metadataSnapshot;
private RecoveryState.Type recoveryType; private RecoveryState.Type recoveryType;
@ -56,12 +54,11 @@ public class StartRecoveryRequest extends TransportRequest {
* @param sourceNode The node to recover from * @param sourceNode The node to recover from
* @param targetNode The node to recover to * @param targetNode The node to recover to
*/ */
public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, boolean markAsRelocated, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) { public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) {
this.recoveryId = recoveryId; this.recoveryId = recoveryId;
this.shardId = shardId; this.shardId = shardId;
this.sourceNode = sourceNode; this.sourceNode = sourceNode;
this.targetNode = targetNode; this.targetNode = targetNode;
this.markAsRelocated = markAsRelocated;
this.recoveryType = recoveryType; this.recoveryType = recoveryType;
this.metadataSnapshot = metadataSnapshot; this.metadataSnapshot = metadataSnapshot;
} }
@ -82,10 +79,6 @@ public class StartRecoveryRequest extends TransportRequest {
return targetNode; return targetNode;
} }
public boolean markAsRelocated() {
return markAsRelocated;
}
public RecoveryState.Type recoveryType() { public RecoveryState.Type recoveryType() {
return recoveryType; return recoveryType;
} }
@ -101,7 +94,6 @@ public class StartRecoveryRequest extends TransportRequest {
shardId = ShardId.readShardId(in); shardId = ShardId.readShardId(in);
sourceNode = DiscoveryNode.readNode(in); sourceNode = DiscoveryNode.readNode(in);
targetNode = DiscoveryNode.readNode(in); targetNode = DiscoveryNode.readNode(in);
markAsRelocated = in.readBoolean();
metadataSnapshot = new Store.MetadataSnapshot(in); metadataSnapshot = new Store.MetadataSnapshot(in);
recoveryType = RecoveryState.Type.fromId(in.readByte()); recoveryType = RecoveryState.Type.fromId(in.readByte());
@ -114,7 +106,6 @@ public class StartRecoveryRequest extends TransportRequest {
shardId.writeTo(out); shardId.writeTo(out);
sourceNode.writeTo(out); sourceNode.writeTo(out);
targetNode.writeTo(out); targetNode.writeTo(out);
out.writeBoolean(markAsRelocated);
metadataSnapshot.writeTo(out); metadataSnapshot.writeTo(out);
out.writeByte(recoveryType.id()); out.writeByte(recoveryType.id());
} }

View File

@ -20,6 +20,7 @@
package org.elasticsearch.ingest; package org.elasticsearch.ingest;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.DeletePipelineRequest;
@ -36,10 +37,8 @@ import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.ingest.core.Pipeline; import org.elasticsearch.ingest.core.Pipeline;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.ingest.core.Processor;
import org.elasticsearch.ingest.core.TemplateService; import org.elasticsearch.ingest.core.TemplateService;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import java.io.Closeable; import java.io.Closeable;
@ -104,8 +103,10 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) {
try { try {
pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactoryRegistry)); pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactoryRegistry));
} catch (ElasticsearchParseException e) {
throw e;
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException(e); throw new ElasticsearchParseException("Error updating pipeline with id [" + pipeline.getId() + "]", e);
} }
} }
this.pipelines = Collections.unmodifiableMap(pipelines); this.pipelines = Collections.unmodifiableMap(pipelines);
@ -154,9 +155,10 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
public void put(ClusterService clusterService, PutPipelineRequest request, ActionListener<WritePipelineResponse> listener) { public void put(ClusterService clusterService, PutPipelineRequest request, ActionListener<WritePipelineResponse> listener) {
// validates the pipeline and processor configuration before submitting a cluster update task: // validates the pipeline and processor configuration before submitting a cluster update task:
Map<String, Object> pipelineConfig = XContentHelper.convertToMap(request.getSource(), false).v2(); Map<String, Object> pipelineConfig = XContentHelper.convertToMap(request.getSource(), false).v2();
WritePipelineResponse response = validatePipelineResponse(request.getId(), pipelineConfig); try {
if (response != null) { factory.create(request.getId(), pipelineConfig, processorFactoryRegistry);
listener.onResponse(response); } catch(Exception e) {
listener.onFailure(e);
return; return;
} }
clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), new AckedClusterStateUpdateTask<WritePipelineResponse>(request, listener) { clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), new AckedClusterStateUpdateTask<WritePipelineResponse>(request, listener) {
@ -234,16 +236,4 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
} }
return result; return result;
} }
WritePipelineResponse validatePipelineResponse(String id, Map<String, Object> config) {
try {
factory.create(id, config, processorFactoryRegistry);
return null;
} catch (ConfigurationPropertyException e) {
return new WritePipelineResponse(new PipelineFactoryError(e));
} catch (Exception e) {
return new WritePipelineResponse(new PipelineFactoryError(e.getMessage()));
}
}
} }

View File

@ -32,7 +32,8 @@ import java.util.Objects;
*/ */
public class CompoundProcessor implements Processor { public class CompoundProcessor implements Processor {
static final String ON_FAILURE_MESSAGE_FIELD = "on_failure_message"; static final String ON_FAILURE_MESSAGE_FIELD = "on_failure_message";
static final String ON_FAILURE_PROCESSOR_FIELD = "on_failure_processor"; static final String ON_FAILURE_PROCESSOR_TYPE_FIELD = "on_failure_processor_type";
static final String ON_FAILURE_PROCESSOR_TAG_FIELD = "on_failure_processor_tag";
private final List<Processor> processors; private final List<Processor> processors;
private final List<Processor> onFailureProcessors; private final List<Processor> onFailureProcessors;
@ -74,24 +75,26 @@ public class CompoundProcessor implements Processor {
if (onFailureProcessors.isEmpty()) { if (onFailureProcessors.isEmpty()) {
throw e; throw e;
} else { } else {
executeOnFailure(ingestDocument, e, processor.getType()); executeOnFailure(ingestDocument, e, processor.getType(), processor.getTag());
} }
break; break;
} }
} }
} }
void executeOnFailure(IngestDocument ingestDocument, Exception cause, String failedProcessorType) throws Exception { void executeOnFailure(IngestDocument ingestDocument, Exception cause, String failedProcessorType, String failedProcessorTag) throws Exception {
Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata(); Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();
try { try {
ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getMessage()); ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getMessage());
ingestMetadata.put(ON_FAILURE_PROCESSOR_FIELD, failedProcessorType); ingestMetadata.put(ON_FAILURE_PROCESSOR_TYPE_FIELD, failedProcessorType);
ingestMetadata.put(ON_FAILURE_PROCESSOR_TAG_FIELD, failedProcessorTag);
for (Processor processor : onFailureProcessors) { for (Processor processor : onFailureProcessors) {
processor.execute(ingestDocument); processor.execute(ingestDocument);
} }
} finally { } finally {
ingestMetadata.remove(ON_FAILURE_MESSAGE_FIELD); ingestMetadata.remove(ON_FAILURE_MESSAGE_FIELD);
ingestMetadata.remove(ON_FAILURE_PROCESSOR_FIELD); ingestMetadata.remove(ON_FAILURE_PROCESSOR_TYPE_FIELD);
ingestMetadata.remove(ON_FAILURE_PROCESSOR_TAG_FIELD);
} }
} }
} }

View File

@ -19,7 +19,8 @@
package org.elasticsearch.ingest.core; package org.elasticsearch.ingest.core;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -32,7 +33,7 @@ public final class ConfigurationUtils {
/** /**
* Returns and removes the specified optional property from the specified configuration map. * Returns and removes the specified optional property from the specified configuration map.
* *
* If the property value isn't of type string a {@link ConfigurationPropertyException} is thrown. * If the property value isn't of type string a {@link ElasticsearchParseException} is thrown.
*/ */
public static String readOptionalStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) { public static String readOptionalStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
@ -42,8 +43,8 @@ public final class ConfigurationUtils {
/** /**
* Returns and removes the specified property from the specified configuration map. * Returns and removes the specified property from the specified configuration map.
* *
* If the property value isn't of type string an {@link ConfigurationPropertyException} is thrown. * If the property value isn't of type string an {@link ElasticsearchParseException} is thrown.
* If the property is missing an {@link ConfigurationPropertyException} is thrown * If the property is missing an {@link ElasticsearchParseException} is thrown
*/ */
public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) { public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
return readStringProperty(processorType, processorTag, configuration, propertyName, null); return readStringProperty(processorType, processorTag, configuration, propertyName, null);
@ -52,15 +53,15 @@ public final class ConfigurationUtils {
/** /**
* Returns and removes the specified property from the specified configuration map. * Returns and removes the specified property from the specified configuration map.
* *
* If the property value isn't of type string a {@link ConfigurationPropertyException} is thrown. * If the property value isn't of type string a {@link ElasticsearchParseException} is thrown.
* If the property is missing and no default value has been specified a {@link ConfigurationPropertyException} is thrown * If the property is missing and no default value has been specified a {@link ElasticsearchParseException} is thrown
*/ */
public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName, String defaultValue) { public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName, String defaultValue) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
if (value == null && defaultValue != null) { if (value == null && defaultValue != null) {
return defaultValue; return defaultValue;
} else if (value == null) { } else if (value == null) {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing"); throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
} }
return readString(processorType, processorTag, propertyName, value); return readString(processorType, processorTag, propertyName, value);
} }
@ -72,13 +73,13 @@ public final class ConfigurationUtils {
if (value instanceof String) { if (value instanceof String) {
return (String) value; return (String) value;
} }
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a string, but of type [" + value.getClass().getName() + "]"); throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a string, but of type [" + value.getClass().getName() + "]");
} }
/** /**
* Returns and removes the specified property of type list from the specified configuration map. * Returns and removes the specified property of type list from the specified configuration map.
* *
* If the property value isn't of type list an {@link ConfigurationPropertyException} is thrown. * If the property value isn't of type list an {@link ElasticsearchParseException} is thrown.
*/ */
public static <T> List<T> readOptionalList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) { public static <T> List<T> readOptionalList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
@ -91,13 +92,13 @@ public final class ConfigurationUtils {
/** /**
* Returns and removes the specified property of type list from the specified configuration map. * Returns and removes the specified property of type list from the specified configuration map.
* *
* If the property value isn't of type list an {@link ConfigurationPropertyException} is thrown. * If the property value isn't of type list an {@link ElasticsearchParseException} is thrown.
* If the property is missing an {@link ConfigurationPropertyException} is thrown * If the property is missing an {@link ElasticsearchParseException} is thrown
*/ */
public static <T> List<T> readList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) { public static <T> List<T> readList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
if (value == null) { if (value == null) {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing"); throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
} }
return readList(processorType, processorTag, propertyName, value); return readList(processorType, processorTag, propertyName, value);
@ -109,20 +110,20 @@ public final class ConfigurationUtils {
List<T> stringList = (List<T>) value; List<T> stringList = (List<T>) value;
return stringList; return stringList;
} else { } else {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a list, but of type [" + value.getClass().getName() + "]"); throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a list, but of type [" + value.getClass().getName() + "]");
} }
} }
/** /**
* Returns and removes the specified property of type map from the specified configuration map. * Returns and removes the specified property of type map from the specified configuration map.
* *
* If the property value isn't of type map an {@link ConfigurationPropertyException} is thrown. * If the property value isn't of type map an {@link ElasticsearchParseException} is thrown.
* If the property is missing an {@link ConfigurationPropertyException} is thrown * If the property is missing an {@link ElasticsearchParseException} is thrown
*/ */
public static <T> Map<String, T> readMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) { public static <T> Map<String, T> readMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
if (value == null) { if (value == null) {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing"); throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
} }
return readMap(processorType, processorTag, propertyName, value); return readMap(processorType, processorTag, propertyName, value);
@ -131,7 +132,7 @@ public final class ConfigurationUtils {
/** /**
* Returns and removes the specified property of type map from the specified configuration map. * Returns and removes the specified property of type map from the specified configuration map.
* *
* If the property value isn't of type map an {@link ConfigurationPropertyException} is thrown. * If the property value isn't of type map an {@link ElasticsearchParseException} is thrown.
*/ */
public static <T> Map<String, T> readOptionalMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) { public static <T> Map<String, T> readOptionalMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
@ -148,7 +149,7 @@ public final class ConfigurationUtils {
Map<String, T> map = (Map<String, T>) value; Map<String, T> map = (Map<String, T>) value;
return map; return map;
} else { } else {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a map, but of type [" + value.getClass().getName() + "]"); throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a map, but of type [" + value.getClass().getName() + "]");
} }
} }
@ -158,8 +159,23 @@ public final class ConfigurationUtils {
public static Object readObject(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) { public static Object readObject(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
if (value == null) { if (value == null) {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing"); throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
} }
return value; return value;
} }
public static ElasticsearchParseException newConfigurationException(String processorType, String processorTag, String propertyName, String reason) {
ElasticsearchParseException exception = new ElasticsearchParseException("[" + propertyName + "] " + reason);
if (processorType != null) {
exception.addHeader("processor_type", processorType);
}
if (processorTag != null) {
exception.addHeader("processor_tag", processorTag);
}
if (propertyName != null) {
exception.addHeader("property_name", propertyName);
}
return exception;
}
} }

View File

@ -19,7 +19,7 @@
package org.elasticsearch.ingest.core; package org.elasticsearch.ingest.core;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException; import org.elasticsearch.ElasticsearchParseException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -27,6 +27,7 @@ import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
/** /**
* A pipeline is a list of {@link Processor} instances grouped under a unique id. * A pipeline is a list of {@link Processor} instances grouped under a unique id.
*/ */
@ -84,20 +85,20 @@ public final class Pipeline {
public final static class Factory { public final static class Factory {
public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorRegistry) throws ConfigurationPropertyException { public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorRegistry) throws Exception {
String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY);
List<Map<String, Map<String, Object>>> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY); List<Map<String, Map<String, Object>>> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY);
List<Processor> processors = readProcessorConfigs(processorConfigs, processorRegistry); List<Processor> processors = readProcessorConfigs(processorConfigs, processorRegistry);
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry); List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
if (config.isEmpty() == false) { if (config.isEmpty() == false) {
throw new ConfigurationPropertyException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); throw new ElasticsearchParseException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
} }
CompoundProcessor compoundProcessor = new CompoundProcessor(Collections.unmodifiableList(processors), Collections.unmodifiableList(onFailureProcessors)); CompoundProcessor compoundProcessor = new CompoundProcessor(Collections.unmodifiableList(processors), Collections.unmodifiableList(onFailureProcessors));
return new Pipeline(id, description, compoundProcessor); return new Pipeline(id, description, compoundProcessor);
} }
private List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs, Map<String, Processor.Factory> processorRegistry) throws ConfigurationPropertyException { private List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs, Map<String, Processor.Factory> processorRegistry) throws Exception {
List<Processor> processors = new ArrayList<>(); List<Processor> processors = new ArrayList<>();
if (processorConfigs != null) { if (processorConfigs != null) {
for (Map<String, Map<String, Object>> processorConfigWithKey : processorConfigs) { for (Map<String, Map<String, Object>> processorConfigWithKey : processorConfigs) {
@ -110,28 +111,22 @@ public final class Pipeline {
return processors; return processors;
} }
private Processor readProcessor(Map<String, Processor.Factory> processorRegistry, String type, Map<String, Object> config) throws ConfigurationPropertyException { private Processor readProcessor(Map<String, Processor.Factory> processorRegistry, String type, Map<String, Object> config) throws Exception {
Processor.Factory factory = processorRegistry.get(type); Processor.Factory factory = processorRegistry.get(type);
if (factory != null) { if (factory != null) {
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry); List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
Processor processor; Processor processor;
try {
processor = factory.create(config); processor = factory.create(config);
} catch (ConfigurationPropertyException e) {
throw e;
} catch (Exception e) {
throw new ConfigurationPropertyException(e.getMessage());
}
if (!config.isEmpty()) { if (!config.isEmpty()) {
throw new ConfigurationPropertyException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); throw new ElasticsearchParseException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
} }
if (onFailureProcessors.isEmpty()) { if (onFailureProcessors.isEmpty()) {
return processor; return processor;
} }
return new CompoundProcessor(Collections.singletonList(processor), onFailureProcessors); return new CompoundProcessor(Collections.singletonList(processor), onFailureProcessors);
} }
throw new ConfigurationPropertyException("No processor type exists with name [" + type + "]"); throw new ElasticsearchParseException("No processor type exists with name [" + type + "]");
} }
} }
} }

View File

@ -1,96 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.core;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import java.io.IOException;
public class PipelineFactoryError implements Streamable, ToXContent {
private String reason;
private String processorType;
private String processorTag;
private String processorPropertyName;
public PipelineFactoryError() {
}
public PipelineFactoryError(ConfigurationPropertyException e) {
this.reason = e.getMessage();
this.processorType = e.getProcessorType();
this.processorTag = e.getProcessorTag();
this.processorPropertyName = e.getPropertyName();
}
public PipelineFactoryError(String reason) {
this.reason = "Constructing Pipeline failed:" + reason;
}
public String getReason() {
return reason;
}
public String getProcessorTag() {
return processorTag;
}
public String getProcessorPropertyName() {
return processorPropertyName;
}
public String getProcessorType() {
return processorType;
}
@Override
public void readFrom(StreamInput in) throws IOException {
reason = in.readString();
processorType = in.readOptionalString();
processorTag = in.readOptionalString();
processorPropertyName = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(reason);
out.writeOptionalString(processorType);
out.writeOptionalString(processorTag);
out.writeOptionalString(processorPropertyName);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("error");
builder.field("type", processorType);
builder.field("tag", processorTag);
builder.field("reason", reason);
builder.field("property_name", processorPropertyName);
builder.endObject();
return builder;
}
}

View File

@ -1,43 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.core;
public class PipelineFactoryResult {
private final Pipeline pipeline;
private final PipelineFactoryError error;
public PipelineFactoryResult(Pipeline pipeline) {
this.pipeline = pipeline;
this.error = null;
}
public PipelineFactoryResult(PipelineFactoryError error) {
this.error = error;
this.pipeline = null;
}
public Pipeline getPipeline() {
return pipeline;
}
public PipelineFactoryError getError() {
return error;
}
}

View File

@ -17,11 +17,8 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.ingest.core; package org.elasticsearch.ingest.core;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import java.util.Map; import java.util.Map;
/** /**

View File

@ -1,53 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.processor;
/**
* Exception class thrown by processor factories.
*/
public class ConfigurationPropertyException extends RuntimeException {
private String processorType;
private String processorTag;
private String propertyName;
public ConfigurationPropertyException(String processorType, String processorTag, String propertyName, String message) {
super("[" + propertyName + "] " + message);
this.processorTag = processorTag;
this.processorType = processorType;
this.propertyName = propertyName;
}
public ConfigurationPropertyException(String errorMessage) {
super(errorMessage);
}
public String getPropertyName() {
return propertyName;
}
public String getProcessorType() {
return processorType;
}
public String getProcessorTag() {
return processorTag;
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.ingest.processor; package org.elasticsearch.ingest.processor;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ingest.core.AbstractProcessor; import org.elasticsearch.ingest.core.AbstractProcessor;
import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.AbstractProcessorFactory;
import org.elasticsearch.ingest.core.IngestDocument; import org.elasticsearch.ingest.core.IngestDocument;
@ -29,6 +30,8 @@ import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.ingest.core.ConfigurationUtils.newConfigurationException;
/** /**
* Processor that converts fields content to a different type. Supported types are: integer, float, boolean and string. * Processor that converts fields content to a different type. Supported types are: integer, float, boolean and string.
* Throws exception if the field is not there or the conversion fails. * Throws exception if the field is not there or the conversion fails.
@ -80,11 +83,11 @@ public class ConvertProcessor extends AbstractProcessor {
public abstract Object convert(Object value); public abstract Object convert(Object value);
public static Type fromString(String type) { public static Type fromString(String processorTag, String propertyName, String type) {
try { try {
return Type.valueOf(type.toUpperCase(Locale.ROOT)); return Type.valueOf(type.toUpperCase(Locale.ROOT));
} catch(IllegalArgumentException e) { } catch(IllegalArgumentException e) {
throw new IllegalArgumentException("type [" + type + "] not supported, cannot convert field.", e); throw newConfigurationException(TYPE, processorTag, propertyName, "type [" + type + "] not supported, cannot convert field.");
} }
} }
} }
@ -138,7 +141,8 @@ public class ConvertProcessor extends AbstractProcessor {
@Override @Override
public ConvertProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public ConvertProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
Type convertType = Type.fromString(ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type")); String typeProperty = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type");
Type convertType = Type.fromString(processorTag, "type", typeProperty);
return new ConvertProcessor(processorTag, field, convertType); return new ConvertProcessor(processorTag, field, convertType);
} }
} }

View File

@ -1,106 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.processor;
import org.elasticsearch.ingest.core.AbstractProcessor;
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
import org.elasticsearch.ingest.core.ConfigurationUtils;
import org.elasticsearch.ingest.core.IngestDocument;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
* Processor that replaces dots in document field names with a
* specified separator.
*/
public class DeDotProcessor extends AbstractProcessor {
public static final String TYPE = "dedot";
static final String DEFAULT_SEPARATOR = "_";
private final String separator;
DeDotProcessor(String tag, String separator) {
super(tag);
this.separator = separator;
}
public String getSeparator() {
return separator;
}
@Override
public void execute(IngestDocument document) {
deDot(document.getSourceAndMetadata());
}
@Override
public String getType() {
return TYPE;
}
/**
* Recursively iterates through Maps and Lists in search of map entries with
* keys containing dots. The dots in these fields are replaced with {@link #separator}.
*
* @param obj The current object in context to be checked for dots in its fields.
*/
private void deDot(Object obj) {
if (obj instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> doc = (Map) obj;
Iterator<Map.Entry<String, Object>> it = doc.entrySet().iterator();
Map<String, Object> deDottedFields = new HashMap<>();
while (it.hasNext()) {
Map.Entry<String, Object> entry = it.next();
deDot(entry.getValue());
String fieldName = entry.getKey();
if (fieldName.contains(".")) {
String deDottedFieldName = fieldName.replaceAll("\\.", separator);
deDottedFields.put(deDottedFieldName, entry.getValue());
it.remove();
}
}
doc.putAll(deDottedFields);
} else if (obj instanceof List) {
@SuppressWarnings("unchecked")
List<Object> list = (List) obj;
for (Object value : list) {
deDot(value);
}
}
}
public static class Factory extends AbstractProcessorFactory<DeDotProcessor> {
@Override
public DeDotProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String separator = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "separator");
if (separator == null) {
separator = DEFAULT_SEPARATOR;
}
return new DeDotProcessor(processorTag, separator);
}
}
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.ingest.core.TemplateService;
import org.elasticsearch.ingest.processor.AppendProcessor; import org.elasticsearch.ingest.processor.AppendProcessor;
import org.elasticsearch.ingest.processor.ConvertProcessor; import org.elasticsearch.ingest.processor.ConvertProcessor;
import org.elasticsearch.ingest.processor.DateProcessor; import org.elasticsearch.ingest.processor.DateProcessor;
import org.elasticsearch.ingest.processor.DeDotProcessor;
import org.elasticsearch.ingest.processor.FailProcessor; import org.elasticsearch.ingest.processor.FailProcessor;
import org.elasticsearch.ingest.processor.GsubProcessor; import org.elasticsearch.ingest.processor.GsubProcessor;
import org.elasticsearch.ingest.processor.JoinProcessor; import org.elasticsearch.ingest.processor.JoinProcessor;
@ -75,7 +74,6 @@ public class NodeModule extends AbstractModule {
registerProcessor(ConvertProcessor.TYPE, (templateService) -> new ConvertProcessor.Factory()); registerProcessor(ConvertProcessor.TYPE, (templateService) -> new ConvertProcessor.Factory());
registerProcessor(GsubProcessor.TYPE, (templateService) -> new GsubProcessor.Factory()); registerProcessor(GsubProcessor.TYPE, (templateService) -> new GsubProcessor.Factory());
registerProcessor(FailProcessor.TYPE, FailProcessor.Factory::new); registerProcessor(FailProcessor.TYPE, FailProcessor.Factory::new);
registerProcessor(DeDotProcessor.TYPE, (templateService) -> new DeDotProcessor.Factory());
} }
@Override @Override

View File

@ -247,8 +247,8 @@ public class InternalSettingsPreparer {
} }
if (secret) { if (secret) {
return new String(terminal.readSecret("Enter value for [%s]: ", key)); return new String(terminal.readSecret("Enter value for [" + key + "]: ", key));
} }
return terminal.readText("Enter value for [%s]: ", key); return terminal.readText("Enter value for [" + key + "]: ", key);
} }
} }

View File

@ -0,0 +1,398 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.hash.MessageDigests;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.URL;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.attribute.PosixFileAttributeView;
import java.nio.file.attribute.PosixFilePermission;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE;
import static org.elasticsearch.common.util.set.Sets.newHashSet;
/**
* A command for the plugin cli to install a plugin into elasticsearch.
*
* The install command takes a plugin id, which may be any of the following:
* <ul>
* <li>An official elasticsearch plugin name</li>
* <li>Maven coordinates to a plugin zip</li>
* <li>A URL to a plugin zip</li>
* </ul>
*
* Plugins are packaged as zip files. Each packaged plugin must contain a
* plugin properties file. See {@link PluginInfo}.
* <p>
* The installation process first extracts the plugin files into a temporary
* directory in order to verify the plugin satisfies the following requirements:
* <ul>
* <li>Jar hell does not exist, either between the plugin's own jars, or with elasticsearch</li>
* <li>The plugin is not a module already provided with elasticsearch</li>
* <li>If the plugin contains extra security permissions, the policy file is validated</li>
* </ul>
* <p>
* A plugin may also contain an optional {@code bin} directory which contains scripts. The
* scripts will be installed into a subdirectory of the elasticsearch bin directory, using
* the name of the plugin, and the scripts will be marked executable.
* <p>
* A plugin may also contain an optional {@code config} directory which contains configuration
* files specific to the plugin. The config files be installed into a subdirectory of the
* elasticsearch config directory, using the name of the plugin. If any files to be installed
* already exist, they will be skipped.
*/
class InstallPluginCommand extends CliTool.Command {
private static final String PROPERTY_SUPPORT_STAGING_URLS = "es.plugins.staging";
// TODO: make this a resource file generated by gradle
static final Set<String> MODULES = unmodifiableSet(newHashSet(
"lang-expression",
"lang-groovy"));
// TODO: make this a resource file generated by gradle
static final Set<String> OFFICIAL_PLUGINS = unmodifiableSet(newHashSet(
"analysis-icu",
"analysis-kuromoji",
"analysis-phonetic",
"analysis-smartcn",
"analysis-stempel",
"delete-by-query",
"discovery-azure",
"discovery-ec2",
"discovery-gce",
"lang-javascript",
"lang-painless",
"lang-python",
"mapper-attachments",
"mapper-murmur3",
"mapper-size",
"repository-azure",
"repository-hdfs",
"repository-s3",
"store-smb"));
private final String pluginId;
private final boolean batch;
InstallPluginCommand(Terminal terminal, String pluginId, boolean batch) {
super(terminal);
this.pluginId = pluginId;
this.batch = batch;
}
@Override
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
// TODO: remove this leniency!! is it needed anymore?
if (Files.exists(env.pluginsFile()) == false) {
terminal.println("Plugins directory [" + env.pluginsFile() + "] does not exist. Creating...");
Files.createDirectory(env.pluginsFile());
}
Path pluginZip = download(pluginId, env.tmpFile());
Path extractedZip = unzip(pluginZip, env.pluginsFile());
install(extractedZip, env);
return CliTool.ExitStatus.OK;
}
/** Downloads the plugin and returns the file it was downloaded to. */
private Path download(String pluginId, Path tmpDir) throws Exception {
if (OFFICIAL_PLUGINS.contains(pluginId)) {
final String version = Version.CURRENT.toString();
final String url;
if (System.getProperty(PROPERTY_SUPPORT_STAGING_URLS, "false").equals("true")) {
url = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%1$s-%2$s/org/elasticsearch/plugin/%3$s/%1$s/%3$s-%1$s.zip",
version, Build.CURRENT.shortHash(), pluginId);
} else {
url = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%1$s/%2$s/%1$s-%2$s.zip",
pluginId, version);
}
terminal.println("-> Downloading " + pluginId + " from elastic");
return downloadZipAndChecksum(url, tmpDir);
}
// now try as maven coordinates, a valid URL would only have a colon and slash
String[] coordinates = pluginId.split(":");
if (coordinates.length == 3 && pluginId.contains("/") == false) {
String mavenUrl = String.format(Locale.ROOT, "https://repo1.maven.org/maven2/%1$s/%2$s/%3$s/%2$s-%3$s.zip",
coordinates[0].replace(".", "/") /* groupId */, coordinates[1] /* artifactId */, coordinates[2] /* version */);
terminal.println("-> Downloading " + pluginId + " from maven central");
return downloadZipAndChecksum(mavenUrl, tmpDir);
}
// fall back to plain old URL
terminal.println("-> Downloading " + URLDecoder.decode(pluginId, "UTF-8"));
return downloadZip(pluginId, tmpDir);
}
/** Downloads a zip from the url, into a temp file under the given temp dir. */
private Path downloadZip(String urlString, Path tmpDir) throws IOException {
URL url = new URL(urlString);
Path zip = Files.createTempFile(tmpDir, null, ".zip");
try (InputStream in = url.openStream()) {
// must overwrite since creating the temp file above actually created the file
Files.copy(in, zip, StandardCopyOption.REPLACE_EXISTING);
}
return zip;
}
/** Downloads a zip from the url, as well as a SHA1 checksum, and checks the checksum. */
private Path downloadZipAndChecksum(String urlString, Path tmpDir) throws Exception {
Path zip = downloadZip(urlString, tmpDir);
URL checksumUrl = new URL(urlString + ".sha1");
final String expectedChecksum;
try (InputStream in = checksumUrl.openStream()) {
BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
expectedChecksum = checksumReader.readLine();
if (checksumReader.readLine() != null) {
throw new UserError(CliTool.ExitStatus.IO_ERROR, "Invalid checksum file at " + checksumUrl);
}
}
byte[] zipbytes = Files.readAllBytes(zip);
String gotChecksum = MessageDigests.toHexString(MessageDigests.sha1().digest(zipbytes));
if (expectedChecksum.equals(gotChecksum) == false) {
throw new UserError(CliTool.ExitStatus.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum);
}
return zip;
}
private Path unzip(Path zip, Path pluginsDir) throws IOException {
// unzip plugin to a staging temp dir
Path target = Files.createTempDirectory(pluginsDir, ".installing-");
Files.createDirectories(target);
// TODO: we should wrap this in a try/catch and try deleting the target dir on failure?
try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) {
ZipEntry entry;
byte[] buffer = new byte[8192];
while ((entry = zipInput.getNextEntry()) != null) {
Path targetFile = target.resolve(entry.getName());
// TODO: handle name being an absolute path
// be on the safe side: do not rely on that directories are always extracted
// before their children (although this makes sense, but is it guaranteed?)
Files.createDirectories(targetFile.getParent());
if (entry.isDirectory() == false) {
try (OutputStream out = Files.newOutputStream(targetFile)) {
int len;
while((len = zipInput.read(buffer)) >= 0) {
out.write(buffer, 0, len);
}
}
}
zipInput.closeEntry();
}
}
return target;
}
/** Load information about the plugin, and verify it can be installed with no errors. */
private PluginInfo verify(Path pluginRoot, Environment env) throws Exception {
// read and validate the plugin descriptor
PluginInfo info = PluginInfo.readFromProperties(pluginRoot);
terminal.println(VERBOSE, info.toString());
// don't let luser install plugin as a module...
// they might be unavoidably in maven central and are packaged up the same way)
if (MODULES.contains(info.getName())) {
throw new UserError(CliTool.ExitStatus.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module");
}
// check for jar hell before any copying
jarHellCheck(pluginRoot, env.pluginsFile(), info.isIsolated());
// read optional security policy (extra permissions)
// if it exists, confirm or warn the user
Path policy = pluginRoot.resolve(PluginInfo.ES_PLUGIN_POLICY);
if (Files.exists(policy)) {
PluginSecurity.readPolicy(policy, terminal, env, batch);
}
return info;
}
/** check a candidate plugin for jar hell before installing it */
private void jarHellCheck(Path candidate, Path pluginsDir, boolean isolated) throws Exception {
// create list of current jars in classpath
final List<URL> jars = new ArrayList<>();
jars.addAll(Arrays.asList(JarHell.parseClassPath()));
// read existing bundles. this does some checks on the installation too.
List<PluginsService.Bundle> bundles = PluginsService.getPluginBundles(pluginsDir);
// if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins
// thats always the first bundle
if (isolated == false) {
jars.addAll(bundles.get(0).urls);
}
// add plugin jars to the list
Path pluginJars[] = FileSystemUtils.files(candidate, "*.jar");
for (Path jar : pluginJars) {
jars.add(jar.toUri().toURL());
}
// TODO: no jars should be an error
// TODO: verify the classname exists in one of the jars!
// check combined (current classpath + new jars to-be-added)
JarHell.checkJarHell(jars.toArray(new URL[jars.size()]));
}
/**
* Installs the plugin from {@code tmpRoot} into the plugins dir.
* If the plugin has a bin dir and/or a config dir, those are copied.
*/
private void install(Path tmpRoot, Environment env) throws Exception {
List<Path> deleteOnFailure = new ArrayList<>();
deleteOnFailure.add(tmpRoot);
try {
PluginInfo info = verify(tmpRoot, env);
final Path destination = env.pluginsFile().resolve(info.getName());
if (Files.exists(destination)) {
throw new UserError(CliTool.ExitStatus.USAGE, "plugin directory " + destination.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command");
}
Path tmpBinDir = tmpRoot.resolve("bin");
if (Files.exists(tmpBinDir)) {
Path destBinDir = env.binFile().resolve(info.getName());
deleteOnFailure.add(destBinDir);
installBin(info, tmpBinDir, destBinDir);
}
Path tmpConfigDir = tmpRoot.resolve("config");
if (Files.exists(tmpConfigDir)) {
// some files may already exist, and we don't remove plugin config files on plugin removal,
// so any installed config files are left on failure too
installConfig(info, tmpConfigDir, env.configFile().resolve(info.getName()));
}
Files.move(tmpRoot, destination, StandardCopyOption.ATOMIC_MOVE);
terminal.println("-> Installed " + info.getName());
} catch (Exception installProblem) {
try {
IOUtils.rm(deleteOnFailure.toArray(new Path[0]));
} catch (IOException exceptionWhileRemovingFiles) {
installProblem.addSuppressed(exceptionWhileRemovingFiles);
}
throw installProblem;
}
}
/** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */
private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception {
if (Files.isDirectory(tmpBinDir) == false) {
throw new UserError(CliTool.ExitStatus.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory");
}
Files.createDirectory(destBinDir);
// setup file attributes for the installed files to those of the parent dir
Set<PosixFilePermission> perms = new HashSet<>();
PosixFileAttributeView binAttrs = Files.getFileAttributeView(destBinDir.getParent(), PosixFileAttributeView.class);
if (binAttrs != null) {
perms = new HashSet<>(binAttrs.readAttributes().permissions());
// setting execute bits, since this just means "the file is executable", and actual execution requires read
perms.add(PosixFilePermission.OWNER_EXECUTE);
perms.add(PosixFilePermission.GROUP_EXECUTE);
perms.add(PosixFilePermission.OTHERS_EXECUTE);
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tmpBinDir)) {
for (Path srcFile : stream) {
if (Files.isDirectory(srcFile)) {
throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName());
}
Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile));
Files.copy(srcFile, destFile);
if (perms.isEmpty() == false) {
PosixFileAttributeView view = Files.getFileAttributeView(destFile, PosixFileAttributeView.class);
view.setPermissions(perms);
}
}
}
IOUtils.rm(tmpBinDir); // clean up what we just copied
}
/**
* Copies the files from {@code tmpConfigDir} into {@code destConfigDir}.
* Any files existing in both the source and destination will be skipped.
*/
private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws Exception {
if (Files.isDirectory(tmpConfigDir) == false) {
throw new UserError(CliTool.ExitStatus.IO_ERROR, "config in plugin " + info.getName() + " is not a directory");
}
// create the plugin's config dir "if necessary"
Files.createDirectories(destConfigDir);
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tmpConfigDir)) {
for (Path srcFile : stream) {
if (Files.isDirectory(srcFile)) {
throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName());
}
Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile));
if (Files.exists(destFile) == false) {
Files.copy(srcFile, destFile);
}
}
}
IOUtils.rm(tmpConfigDir); // clean up what we just copied
}
}

View File

@ -0,0 +1,56 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import java.io.IOException;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
/**
* A command for the plugin cli to list plugins installed in elasticsearch.
*/
class ListPluginsCommand extends CliTool.Command {
ListPluginsCommand(Terminal terminal) {
super(terminal);
}
@Override
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
if (Files.exists(env.pluginsFile()) == false) {
throw new IOException("Plugins directory missing: " + env.pluginsFile());
}
terminal.println(Terminal.Verbosity.VERBOSE, "Plugins directory: " + env.pluginsFile());
try (DirectoryStream<Path> stream = Files.newDirectoryStream(env.pluginsFile())) {
for (Path plugin : stream) {
terminal.println(plugin.getFileName().toString());
}
}
return CliTool.ExitStatus.OK;
}
}

View File

@ -0,0 +1,124 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.apache.commons.cli.CommandLine;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.logging.log4j.LogConfigurator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.util.Locale;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.option;
/**
* A cli tool for adding, removing and listing plugins for elasticsearch.
*/
public class PluginCli extends CliTool {
// commands
private static final String LIST_CMD_NAME = "list";
private static final String INSTALL_CMD_NAME = "install";
private static final String REMOVE_CMD_NAME = "remove";
// usage config
private static final CliToolConfig.Cmd LIST_CMD = cmd(LIST_CMD_NAME, ListPluginsCommand.class).build();
private static final CliToolConfig.Cmd INSTALL_CMD = cmd(INSTALL_CMD_NAME, InstallPluginCommand.class)
.options(option("b", "batch").required(false))
.build();
private static final CliToolConfig.Cmd REMOVE_CMD = cmd(REMOVE_CMD_NAME, RemovePluginCommand.class).build();
static final CliToolConfig CONFIG = CliToolConfig.config("plugin", PluginCli.class)
.cmds(LIST_CMD, INSTALL_CMD, REMOVE_CMD)
.build();
public static void main(String[] args) throws Exception {
// initialize default for es.logger.level because we will not read the logging.yml
String loggerLevel = System.getProperty("es.logger.level", "INFO");
// Set the appender for all potential log files to terminal so that other components that use the logger print out the
// same terminal.
// The reason for this is that the plugin cli cannot be configured with a file appender because when the plugin command is
// executed there is no way of knowing where the logfiles should be placed. For example, if elasticsearch
// is run as service then the logs should be at /var/log/elasticsearch but when started from the tar they should be at es.home/logs.
// Therefore we print to Terminal.
Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder()
.put("appender.terminal.type", "terminal")
.put("rootLogger", "${es.logger.level}, terminal")
.put("es.logger.level", loggerLevel)
.build(), Terminal.DEFAULT);
// configure but do not read the logging conf file
LogConfigurator.configure(env.settings(), false);
int status = new PluginCli(Terminal.DEFAULT).execute(args).status();
exit(status);
}
@SuppressForbidden(reason = "Allowed to exit explicitly from #main()")
private static void exit(int status) {
System.exit(status);
}
PluginCli(Terminal terminal) {
super(CONFIG, terminal);
}
@Override
protected Command parse(String cmdName, CommandLine cli) throws Exception {
switch (cmdName.toLowerCase(Locale.ROOT)) {
case LIST_CMD_NAME:
return new ListPluginsCommand(terminal);
case INSTALL_CMD_NAME:
return parseInstallPluginCommand(cli);
case REMOVE_CMD_NAME:
return parseRemovePluginCommand(cli);
default:
assert false : "can't get here as cmd name is validated before this method is called";
return exitCmd(ExitStatus.USAGE);
}
}
private Command parseInstallPluginCommand(CommandLine cli) {
String[] args = cli.getArgs();
if (args.length != 1) {
return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin id argument");
}
boolean batch = System.console() == null;
if (cli.hasOption("b")) {
batch = true;
}
return new InstallPluginCommand(terminal, args[0], batch);
}
private Command parseRemovePluginCommand(CommandLine cli) {
String[] args = cli.getArgs();
if (args.length != 1) {
return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin name argument");
}
return new RemovePluginCommand(terminal, args[0]);
}
}

View File

@ -82,7 +82,6 @@ public class PluginInfo implements Streamable, ToXContent {
if (name == null || name.isEmpty()) { if (name == null || name.isEmpty()) {
throw new IllegalArgumentException("Property [name] is missing in [" + descriptor + "]"); throw new IllegalArgumentException("Property [name] is missing in [" + descriptor + "]");
} }
PluginManager.checkForForbiddenName(name);
String description = props.getProperty("description"); String description = props.getProperty("description");
if (description == null) { if (description == null) {
throw new IllegalArgumentException("Property [description] is missing for plugin [" + name + "]"); throw new IllegalArgumentException("Property [description] is missing for plugin [" + name + "]");

View File

@ -1,686 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchCorruptionException;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.http.client.HttpDownloadHelper;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.PluginsService.Bundle;
import java.io.IOException;
import java.io.OutputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.file.DirectoryStream;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.GroupPrincipal;
import java.nio.file.attribute.PosixFileAttributeView;
import java.nio.file.attribute.PosixFileAttributes;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.UserPrincipal;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.stream.StreamSupport;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.Strings.hasLength;
import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE;
import static org.elasticsearch.common.io.FileSystemUtils.moveFilesWithoutOverwriting;
import static org.elasticsearch.common.util.set.Sets.newHashSet;
/**
*
*/
public class PluginManager {
public static final String PROPERTY_SUPPORT_STAGING_URLS = "es.plugins.staging";
public enum OutputMode {
DEFAULT, SILENT, VERBOSE
}
private static final Set<String> BLACKLIST = unmodifiableSet(newHashSet(
"elasticsearch",
"elasticsearch.bat",
"elasticsearch.in.sh",
"plugin",
"plugin.bat",
"service.bat"));
static final Set<String> MODULES = unmodifiableSet(newHashSet(
"lang-expression",
"lang-groovy"));
static final Set<String> OFFICIAL_PLUGINS = unmodifiableSet(newHashSet(
"analysis-icu",
"analysis-kuromoji",
"analysis-phonetic",
"analysis-smartcn",
"analysis-stempel",
"delete-by-query",
"discovery-azure",
"discovery-ec2",
"discovery-gce",
"discovery-multicast",
"ingest-geoip",
"lang-javascript",
"lang-painless",
"lang-python",
"mapper-attachments",
"mapper-murmur3",
"mapper-size",
"repository-azure",
"repository-hdfs",
"repository-s3",
"store-smb"));
private final Environment environment;
private URL url;
private OutputMode outputMode;
private TimeValue timeout;
public PluginManager(Environment environment, URL url, OutputMode outputMode, TimeValue timeout) {
this.environment = environment;
this.url = url;
this.outputMode = outputMode;
this.timeout = timeout;
}
public void downloadAndExtract(String name, Terminal terminal, boolean batch) throws IOException {
if (name == null && url == null) {
throw new IllegalArgumentException("plugin name or url must be supplied with install.");
}
if (!Files.exists(environment.pluginsFile())) {
terminal.println("Plugins directory [%s] does not exist. Creating...", environment.pluginsFile());
Files.createDirectory(environment.pluginsFile());
}
if (!Environment.isWritable(environment.pluginsFile())) {
throw new IOException("plugin directory " + environment.pluginsFile() + " is read only");
}
PluginHandle pluginHandle;
if (name != null) {
pluginHandle = PluginHandle.parse(name);
checkForForbiddenName(pluginHandle.name);
} else {
// if we have no name but url, use temporary name that will be overwritten later
pluginHandle = new PluginHandle("temp_name" + Randomness.get().nextInt(), null, null);
}
Path pluginFile = download(pluginHandle, terminal);
extract(pluginHandle, terminal, pluginFile, batch);
}
private Path download(PluginHandle pluginHandle, Terminal terminal) throws IOException {
Path pluginFile = pluginHandle.newDistroFile(environment);
HttpDownloadHelper downloadHelper = new HttpDownloadHelper();
boolean downloaded = false;
boolean verified = false;
HttpDownloadHelper.DownloadProgress progress;
if (outputMode == OutputMode.SILENT) {
progress = new HttpDownloadHelper.NullProgress();
} else {
progress = new HttpDownloadHelper.VerboseProgress(terminal.writer());
}
// first, try directly from the URL provided
if (url != null) {
URL pluginUrl = url;
boolean isSecureProcotol = "https".equalsIgnoreCase(pluginUrl.getProtocol());
boolean isAuthInfoSet = !Strings.isNullOrEmpty(pluginUrl.getUserInfo());
if (isAuthInfoSet && !isSecureProcotol) {
throw new IOException("Basic auth is only supported for HTTPS!");
}
terminal.println("Trying %s ...", pluginUrl.toExternalForm());
try {
downloadHelper.download(pluginUrl, pluginFile, progress, this.timeout);
downloaded = true;
terminal.println("Verifying %s checksums if available ...", pluginUrl.toExternalForm());
Tuple<URL, Path> sha1Info = pluginHandle.newChecksumUrlAndFile(environment, pluginUrl, "sha1");
verified = downloadHelper.downloadAndVerifyChecksum(sha1Info.v1(), pluginFile,
sha1Info.v2(), progress, this.timeout, HttpDownloadHelper.SHA1_CHECKSUM);
Tuple<URL, Path> md5Info = pluginHandle.newChecksumUrlAndFile(environment, pluginUrl, "md5");
verified = verified || downloadHelper.downloadAndVerifyChecksum(md5Info.v1(), pluginFile,
md5Info.v2(), progress, this.timeout, HttpDownloadHelper.MD5_CHECKSUM);
} catch (ElasticsearchTimeoutException | ElasticsearchCorruptionException e) {
throw e;
} catch (Exception e) {
// ignore
terminal.println("Failed: %s", ExceptionsHelper.detailedMessage(e));
}
} else {
if (PluginHandle.isOfficialPlugin(pluginHandle.name, pluginHandle.user, pluginHandle.version)) {
checkForOfficialPlugins(pluginHandle.name);
}
}
if (!downloaded && url == null) {
// We try all possible locations
for (URL url : pluginHandle.urls()) {
terminal.println("Trying %s ...", url.toExternalForm());
try {
downloadHelper.download(url, pluginFile, progress, this.timeout);
downloaded = true;
terminal.println("Verifying %s checksums if available ...", url.toExternalForm());
Tuple<URL, Path> sha1Info = pluginHandle.newChecksumUrlAndFile(environment, url, "sha1");
verified = downloadHelper.downloadAndVerifyChecksum(sha1Info.v1(), pluginFile,
sha1Info.v2(), progress, this.timeout, HttpDownloadHelper.SHA1_CHECKSUM);
Tuple<URL, Path> md5Info = pluginHandle.newChecksumUrlAndFile(environment, url, "md5");
verified = verified || downloadHelper.downloadAndVerifyChecksum(md5Info.v1(), pluginFile,
md5Info.v2(), progress, this.timeout, HttpDownloadHelper.MD5_CHECKSUM);
break;
} catch (ElasticsearchTimeoutException | ElasticsearchCorruptionException e) {
throw e;
} catch (Exception e) {
terminal.println(VERBOSE, "Failed: %s", ExceptionsHelper.detailedMessage(e));
}
}
}
if (!downloaded) {
// try to cleanup what we downloaded
IOUtils.deleteFilesIgnoringExceptions(pluginFile);
throw new IOException("failed to download out of all possible locations..., use --verbose to get detailed information");
}
if (verified == false) {
terminal.println("NOTE: Unable to verify checksum for downloaded plugin (unable to find .sha1 or .md5 file to verify)");
}
return pluginFile;
}
private void extract(PluginHandle pluginHandle, Terminal terminal, Path pluginFile, boolean batch) throws IOException {
// unzip plugin to a staging temp dir, named for the plugin
Path tmp = Files.createTempDirectory(environment.tmpFile(), null);
Path root = tmp.resolve(pluginHandle.name);
unzipPlugin(pluginFile, root);
// find the actual root (in case its unzipped with extra directory wrapping)
root = findPluginRoot(root);
// read and validate the plugin descriptor
PluginInfo info = PluginInfo.readFromProperties(root);
terminal.println(VERBOSE, "%s", info);
// don't let luser install plugin as a module...
// they might be unavoidably in maven central and are packaged up the same way)
if (MODULES.contains(info.getName())) {
throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module");
}
// update name in handle based on 'name' property found in descriptor file
pluginHandle = new PluginHandle(info.getName(), pluginHandle.version, pluginHandle.user);
final Path extractLocation = pluginHandle.extractedDir(environment);
if (Files.exists(extractLocation)) {
throw new IOException("plugin directory " + extractLocation.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + pluginHandle.name + "' command");
}
// check for jar hell before any copying
jarHellCheck(root, info.isIsolated());
// read optional security policy (extra permissions)
// if it exists, confirm or warn the user
Path policy = root.resolve(PluginInfo.ES_PLUGIN_POLICY);
if (Files.exists(policy)) {
PluginSecurity.readPolicy(policy, terminal, environment, batch);
}
// install plugin
FileSystemUtils.copyDirectoryRecursively(root, extractLocation);
terminal.println("Installed %s into %s", pluginHandle.name, extractLocation.toAbsolutePath());
// cleanup
tryToDeletePath(terminal, tmp, pluginFile);
// take care of bin/ by moving and applying permissions if needed
Path sourcePluginBinDirectory = extractLocation.resolve("bin");
Path destPluginBinDirectory = pluginHandle.binDir(environment);
boolean needToCopyBinDirectory = Files.exists(sourcePluginBinDirectory);
if (needToCopyBinDirectory) {
if (Files.exists(destPluginBinDirectory) && !Files.isDirectory(destPluginBinDirectory)) {
tryToDeletePath(terminal, extractLocation);
throw new IOException("plugin bin directory " + destPluginBinDirectory + " is not a directory");
}
try {
copyBinDirectory(sourcePluginBinDirectory, destPluginBinDirectory, pluginHandle.name, terminal);
} catch (IOException e) {
// rollback and remove potentially before installed leftovers
terminal.printError("Error copying bin directory [%s] to [%s], cleaning up, reason: %s", sourcePluginBinDirectory, destPluginBinDirectory, ExceptionsHelper.detailedMessage(e));
tryToDeletePath(terminal, extractLocation, pluginHandle.binDir(environment));
throw e;
}
}
Path sourceConfigDirectory = extractLocation.resolve("config");
Path destConfigDirectory = pluginHandle.configDir(environment);
boolean needToCopyConfigDirectory = Files.exists(sourceConfigDirectory);
if (needToCopyConfigDirectory) {
if (Files.exists(destConfigDirectory) && !Files.isDirectory(destConfigDirectory)) {
tryToDeletePath(terminal, extractLocation, destPluginBinDirectory);
throw new IOException("plugin config directory " + destConfigDirectory + " is not a directory");
}
try {
terminal.println(VERBOSE, "Found config, moving to %s", destConfigDirectory.toAbsolutePath());
moveFilesWithoutOverwriting(sourceConfigDirectory, destConfigDirectory, ".new");
if (Environment.getFileStore(destConfigDirectory).supportsFileAttributeView(PosixFileAttributeView.class)) {
//We copy owner, group and permissions from the parent ES_CONFIG directory, assuming they were properly set depending
// on how es was installed in the first place: can be root:elasticsearch (750) if es was installed from rpm/deb packages
// or most likely elasticsearch:elasticsearch if installed from tar/zip. As for permissions we don't rely on umask.
PosixFileAttributes parentDirAttributes = Files.getFileAttributeView(destConfigDirectory.getParent(), PosixFileAttributeView.class).readAttributes();
//for files though, we make sure not to copy execute permissions from the parent dir and leave them untouched
Set<PosixFilePermission> baseFilePermissions = new HashSet<>();
for (PosixFilePermission posixFilePermission : parentDirAttributes.permissions()) {
switch (posixFilePermission) {
case OWNER_EXECUTE:
case GROUP_EXECUTE:
case OTHERS_EXECUTE:
break;
default:
baseFilePermissions.add(posixFilePermission);
}
}
Files.walkFileTree(destConfigDirectory, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (attrs.isRegularFile()) {
Set<PosixFilePermission> newFilePermissions = new HashSet<>(baseFilePermissions);
Set<PosixFilePermission> currentFilePermissions = Files.getPosixFilePermissions(file);
for (PosixFilePermission posixFilePermission : currentFilePermissions) {
switch (posixFilePermission) {
case OWNER_EXECUTE:
case GROUP_EXECUTE:
case OTHERS_EXECUTE:
newFilePermissions.add(posixFilePermission);
}
}
setPosixFileAttributes(file, parentDirAttributes.owner(), parentDirAttributes.group(), newFilePermissions);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
setPosixFileAttributes(dir, parentDirAttributes.owner(), parentDirAttributes.group(), parentDirAttributes.permissions());
return FileVisitResult.CONTINUE;
}
});
} else {
terminal.println(VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission");
}
terminal.println(VERBOSE, "Installed %s into %s", pluginHandle.name, destConfigDirectory.toAbsolutePath());
} catch (IOException e) {
terminal.printError("Error copying config directory [%s] to [%s], cleaning up, reason: %s", sourceConfigDirectory, destConfigDirectory, ExceptionsHelper.detailedMessage(e));
tryToDeletePath(terminal, extractLocation, destPluginBinDirectory, destConfigDirectory);
throw e;
}
}
}
private static void setPosixFileAttributes(Path path, UserPrincipal owner, GroupPrincipal group, Set<PosixFilePermission> permissions) throws IOException {
PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class);
fileAttributeView.setOwner(owner);
fileAttributeView.setGroup(group);
fileAttributeView.setPermissions(permissions);
}
static void tryToDeletePath(Terminal terminal, Path ... paths) {
for (Path path : paths) {
try {
IOUtils.rm(path);
} catch (IOException e) {
terminal.printError(e);
}
}
}
private void copyBinDirectory(Path sourcePluginBinDirectory, Path destPluginBinDirectory, String pluginName, Terminal terminal) throws IOException {
boolean canCopyFromSource = Files.exists(sourcePluginBinDirectory) && Files.isReadable(sourcePluginBinDirectory) && Files.isDirectory(sourcePluginBinDirectory);
if (canCopyFromSource) {
terminal.println(VERBOSE, "Found bin, moving to %s", destPluginBinDirectory.toAbsolutePath());
if (Files.exists(destPluginBinDirectory)) {
IOUtils.rm(destPluginBinDirectory);
}
try {
Files.createDirectories(destPluginBinDirectory.getParent());
FileSystemUtils.move(sourcePluginBinDirectory, destPluginBinDirectory);
} catch (IOException e) {
throw new IOException("Could not move [" + sourcePluginBinDirectory + "] to [" + destPluginBinDirectory + "]", e);
}
if (Environment.getFileStore(destPluginBinDirectory).supportsFileAttributeView(PosixFileAttributeView.class)) {
PosixFileAttributes parentDirAttributes = Files.getFileAttributeView(destPluginBinDirectory.getParent(), PosixFileAttributeView.class).readAttributes();
//copy permissions from parent bin directory
Set<PosixFilePermission> filePermissions = new HashSet<>();
for (PosixFilePermission posixFilePermission : parentDirAttributes.permissions()) {
switch (posixFilePermission) {
case OWNER_EXECUTE:
case GROUP_EXECUTE:
case OTHERS_EXECUTE:
break;
default:
filePermissions.add(posixFilePermission);
}
}
// add file execute permissions to existing perms, so execution will work.
filePermissions.add(PosixFilePermission.OWNER_EXECUTE);
filePermissions.add(PosixFilePermission.GROUP_EXECUTE);
filePermissions.add(PosixFilePermission.OTHERS_EXECUTE);
Files.walkFileTree(destPluginBinDirectory, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (attrs.isRegularFile()) {
setPosixFileAttributes(file, parentDirAttributes.owner(), parentDirAttributes.group(), filePermissions);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
setPosixFileAttributes(dir, parentDirAttributes.owner(), parentDirAttributes.group(), parentDirAttributes.permissions());
return FileVisitResult.CONTINUE;
}
});
} else {
terminal.println(VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission");
}
terminal.println(VERBOSE, "Installed %s into %s", pluginName, destPluginBinDirectory.toAbsolutePath());
}
}
/** we check whether we need to remove the top-level folder while extracting
* sometimes (e.g. github) the downloaded archive contains a top-level folder which needs to be removed
*/
private Path findPluginRoot(Path dir) throws IOException {
if (Files.exists(dir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES))) {
return dir;
} else {
final Path[] topLevelFiles = FileSystemUtils.files(dir);
if (topLevelFiles.length == 1 && Files.isDirectory(topLevelFiles[0])) {
Path subdir = topLevelFiles[0];
if (Files.exists(subdir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES))) {
return subdir;
}
}
}
throw new RuntimeException("Could not find plugin descriptor '" + PluginInfo.ES_PLUGIN_PROPERTIES + "' in plugin zip");
}
/** check a candidate plugin for jar hell before installing it */
private void jarHellCheck(Path candidate, boolean isolated) throws IOException {
// create list of current jars in classpath
final List<URL> jars = new ArrayList<>();
jars.addAll(Arrays.asList(JarHell.parseClassPath()));
// read existing bundles. this does some checks on the installation too.
List<Bundle> bundles = PluginsService.getPluginBundles(environment.pluginsFile());
// if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins
// thats always the first bundle
if (isolated == false) {
jars.addAll(bundles.get(0).urls);
}
// add plugin jars to the list
Path pluginJars[] = FileSystemUtils.files(candidate, "*.jar");
for (Path jar : pluginJars) {
jars.add(jar.toUri().toURL());
}
// check combined (current classpath + new jars to-be-added)
try {
JarHell.checkJarHell(jars.toArray(new URL[jars.size()]));
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
private void unzipPlugin(Path zip, Path target) throws IOException {
Files.createDirectories(target);
try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) {
ZipEntry entry;
byte[] buffer = new byte[8192];
while ((entry = zipInput.getNextEntry()) != null) {
Path targetFile = target.resolve(entry.getName());
// be on the safe side: do not rely on that directories are always extracted
// before their children (although this makes sense, but is it guaranteed?)
Files.createDirectories(targetFile.getParent());
if (entry.isDirectory() == false) {
try (OutputStream out = Files.newOutputStream(targetFile)) {
int len;
while((len = zipInput.read(buffer)) >= 0) {
out.write(buffer, 0, len);
}
}
}
zipInput.closeEntry();
}
}
}
public void removePlugin(String name, Terminal terminal) throws IOException {
if (name == null) {
throw new IllegalArgumentException("plugin name must be supplied with remove [name].");
}
PluginHandle pluginHandle = PluginHandle.parse(name);
boolean removed = false;
checkForForbiddenName(pluginHandle.name);
Path pluginToDelete = pluginHandle.extractedDir(environment);
if (Files.exists(pluginToDelete)) {
terminal.println(VERBOSE, "Removing: %s", pluginToDelete);
try {
IOUtils.rm(pluginToDelete);
} catch (IOException ex){
throw new IOException("Unable to remove " + pluginHandle.name + ". Check file permissions on " +
pluginToDelete.toString(), ex);
}
removed = true;
}
Path binLocation = pluginHandle.binDir(environment);
if (Files.exists(binLocation)) {
terminal.println(VERBOSE, "Removing: %s", binLocation);
try {
IOUtils.rm(binLocation);
} catch (IOException ex){
throw new IOException("Unable to remove " + pluginHandle.name + ". Check file permissions on " +
binLocation.toString(), ex);
}
removed = true;
}
if (removed) {
terminal.println("Removed %s", name);
} else {
terminal.println("Plugin %s not found. Run \"plugin list\" to get list of installed plugins.", name);
}
}
static void checkForForbiddenName(String name) {
if (!hasLength(name) || BLACKLIST.contains(name.toLowerCase(Locale.ROOT))) {
throw new IllegalArgumentException("Illegal plugin name: " + name);
}
}
protected static void checkForOfficialPlugins(String name) {
// We make sure that users can use only new short naming for official plugins only
if (!OFFICIAL_PLUGINS.contains(name)) {
throw new IllegalArgumentException(name +
" is not an official plugin so you should install it using elasticsearch/" +
name + "/latest naming form.");
}
}
public Path[] getListInstalledPlugins() throws IOException {
if (!Files.exists(environment.pluginsFile())) {
return new Path[0];
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.pluginsFile())) {
return StreamSupport.stream(stream.spliterator(), false).toArray(length -> new Path[length]);
}
}
public void listInstalledPlugins(Terminal terminal) throws IOException {
Path[] plugins = getListInstalledPlugins();
terminal.println("Installed plugins in %s:", environment.pluginsFile().toAbsolutePath());
if (plugins == null || plugins.length == 0) {
terminal.println(" - No plugin detected");
} else {
for (Path plugin : plugins) {
terminal.println(" - " + plugin.getFileName());
}
}
}
/**
* Helper class to extract properly user name, repository name, version and plugin name
* from plugin name given by a user.
*/
static class PluginHandle {
final String version;
final String user;
final String name;
PluginHandle(String name, String version, String user) {
this.version = version;
this.user = user;
this.name = name;
}
List<URL> urls() {
List<URL> urls = new ArrayList<>();
if (version != null) {
// Elasticsearch new download service uses groupId org.elasticsearch.plugin from 2.0.0
if (user == null) {
if (!Strings.isNullOrEmpty(System.getProperty(PROPERTY_SUPPORT_STAGING_URLS))) {
addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", version, Build.CURRENT.shortHash(), name, version, name, version));
}
addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip", name, version, name, version));
} else {
// Elasticsearch old download service
addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/%1$s/%2$s/%2$s-%3$s.zip", user, name, version));
// Maven central repository
addUrl(urls, String.format(Locale.ROOT, "https://search.maven.org/remotecontent?filepath=%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version));
// Sonatype repository
addUrl(urls, String.format(Locale.ROOT, "https://oss.sonatype.org/service/local/repositories/releases/content/%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version));
// Github repository
addUrl(urls, String.format(Locale.ROOT, "https://github.com/%1$s/%2$s/archive/%3$s.zip", user, name, version));
}
}
if (user != null) {
// Github repository for master branch (assume site)
addUrl(urls, String.format(Locale.ROOT, "https://github.com/%1$s/%2$s/archive/master.zip", user, name));
}
return urls;
}
private static void addUrl(List<URL> urls, String url) {
try {
urls.add(new URL(url));
} catch (MalformedURLException e) {
// We simply ignore malformed URL
}
}
Path newDistroFile(Environment env) throws IOException {
return Files.createTempFile(env.tmpFile(), name, ".zip");
}
Tuple<URL, Path> newChecksumUrlAndFile(Environment env, URL originalUrl, String suffix) throws IOException {
URL newUrl = new URL(originalUrl.toString() + "." + suffix);
return new Tuple<>(newUrl, Files.createTempFile(env.tmpFile(), name, ".zip." + suffix));
}
Path extractedDir(Environment env) {
return env.pluginsFile().resolve(name);
}
Path binDir(Environment env) {
return env.binFile().resolve(name);
}
Path configDir(Environment env) {
return env.configFile().resolve(name);
}
static PluginHandle parse(String name) {
String[] elements = name.split("/");
// We first consider the simplest form: pluginname
String repo = elements[0];
String user = null;
String version = null;
// We consider the form: username/pluginname
if (elements.length > 1) {
user = elements[0];
repo = elements[1];
// We consider the form: username/pluginname/version
if (elements.length > 2) {
version = elements[2];
}
}
if (isOfficialPlugin(repo, user, version)) {
return new PluginHandle(repo, Version.CURRENT.number(), null);
}
return new PluginHandle(repo, version, user);
}
static boolean isOfficialPlugin(String repo, String user, String version) {
return version == null && user == null && !Strings.isNullOrEmpty(repo);
}
}
}

View File

@ -1,256 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.apache.commons.cli.CommandLine;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.logging.log4j.LogConfigurator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.PluginManager.OutputMode;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.Locale;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.option;
public class PluginManagerCliParser extends CliTool {
// By default timeout is 0 which means no timeout
public static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueMillis(0);
private static final CliToolConfig CONFIG = CliToolConfig.config("plugin", PluginManagerCliParser.class)
.cmds(ListPlugins.CMD, Install.CMD, Remove.CMD)
.build();
public static void main(String[] args) {
// initialize default for es.logger.level because we will not read the logging.yml
String loggerLevel = System.getProperty("es.logger.level", "INFO");
// Set the appender for all potential log files to terminal so that other components that use the logger print out the
// same terminal.
// The reason for this is that the plugin cli cannot be configured with a file appender because when the plugin command is
// executed there is no way of knowing where the logfiles should be placed. For example, if elasticsearch
// is run as service then the logs should be at /var/log/elasticsearch but when started from the tar they should be at es.home/logs.
// Therefore we print to Terminal.
Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder()
.put("appender.terminal.type", "terminal")
.put("rootLogger", "${es.logger.level}, terminal")
.put("es.logger.level", loggerLevel)
.build(), Terminal.DEFAULT);
// configure but do not read the logging conf file
LogConfigurator.configure(env.settings(), false);
int status = new PluginManagerCliParser().execute(args).status();
exit(status);
}
@SuppressForbidden(reason = "Allowed to exit explicitly from #main()")
private static void exit(int status) {
System.exit(status);
}
public PluginManagerCliParser() {
super(CONFIG);
}
public PluginManagerCliParser(Terminal terminal) {
super(CONFIG, terminal);
}
@Override
protected Command parse(String cmdName, CommandLine cli) throws Exception {
switch (cmdName.toLowerCase(Locale.ROOT)) {
case Install.NAME:
return Install.parse(terminal, cli);
case ListPlugins.NAME:
return ListPlugins.parse(terminal, cli);
case Remove.NAME:
return Remove.parse(terminal, cli);
default:
assert false : "can't get here as cmd name is validated before this method is called";
return exitCmd(ExitStatus.USAGE);
}
}
/**
* List all installed plugins
*/
static class ListPlugins extends CliTool.Command {
private static final String NAME = "list";
private static final CliToolConfig.Cmd CMD = cmd(NAME, ListPlugins.class).build();
private final OutputMode outputMode;
public static Command parse(Terminal terminal, CommandLine cli) {
OutputMode outputMode = OutputMode.DEFAULT;
if (cli.hasOption("s")) {
outputMode = OutputMode.SILENT;
}
if (cli.hasOption("v")) {
outputMode = OutputMode.VERBOSE;
}
return new ListPlugins(terminal, outputMode);
}
ListPlugins(Terminal terminal, OutputMode outputMode) {
super(terminal);
this.outputMode = outputMode;
}
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
PluginManager pluginManager = new PluginManager(env, null, outputMode, DEFAULT_TIMEOUT);
pluginManager.listInstalledPlugins(terminal);
return ExitStatus.OK;
}
}
/**
* Remove a plugin
*/
static class Remove extends CliTool.Command {
private static final String NAME = "remove";
private static final CliToolConfig.Cmd CMD = cmd(NAME, Remove.class).build();
public static Command parse(Terminal terminal, CommandLine cli) {
String[] args = cli.getArgs();
if (args.length == 0) {
return exitCmd(ExitStatus.USAGE, terminal, "plugin name is missing (type -h for help)");
}
OutputMode outputMode = OutputMode.DEFAULT;
if (cli.hasOption("s")) {
outputMode = OutputMode.SILENT;
}
if (cli.hasOption("v")) {
outputMode = OutputMode.VERBOSE;
}
return new Remove(terminal, outputMode, args[0]);
}
private OutputMode outputMode;
final String pluginName;
Remove(Terminal terminal, OutputMode outputMode, String pluginToRemove) {
super(terminal);
this.outputMode = outputMode;
this.pluginName = pluginToRemove;
}
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
PluginManager pluginManager = new PluginManager(env, null, outputMode, DEFAULT_TIMEOUT);
terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "...");
pluginManager.removePlugin(pluginName, terminal);
return ExitStatus.OK;
}
}
/**
* Installs a plugin
*/
static class Install extends Command {
private static final String NAME = "install";
private static final CliToolConfig.Cmd CMD = cmd(NAME, Install.class)
.options(option("t", "timeout").required(false).hasArg(false))
.options(option("b", "batch").required(false))
.build();
static Command parse(Terminal terminal, CommandLine cli) {
String[] args = cli.getArgs();
// install [plugin-name/url]
if ((args == null) || (args.length == 0)) {
return exitCmd(ExitStatus.USAGE, terminal, "plugin name or url is missing (type -h for help)");
}
String name = args[0];
URL optionalPluginUrl = null;
// try parsing cli argument as URL
try {
optionalPluginUrl = new URL(name);
name = null;
} catch (MalformedURLException e) {
// we tried to parse the cli argument as url and failed
// continue treating it as a symbolic plugin name like `analysis-icu` etc.
}
TimeValue timeout = TimeValue.parseTimeValue(cli.getOptionValue("t"), DEFAULT_TIMEOUT, "cli");
OutputMode outputMode = OutputMode.DEFAULT;
if (cli.hasOption("s")) {
outputMode = OutputMode.SILENT;
}
if (cli.hasOption("v")) {
outputMode = OutputMode.VERBOSE;
}
boolean batch = System.console() == null;
if (cli.hasOption("b")) {
batch = true;
}
return new Install(terminal, name, outputMode, optionalPluginUrl, timeout, batch);
}
final String name;
private OutputMode outputMode;
final URL url;
final TimeValue timeout;
final boolean batch;
Install(Terminal terminal, String name, OutputMode outputMode, URL url, TimeValue timeout, boolean batch) {
super(terminal);
this.name = name;
this.outputMode = outputMode;
this.url = url;
this.timeout = timeout;
this.batch = batch;
}
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
PluginManager pluginManager = new PluginManager(env, url, outputMode, timeout);
if (name != null) {
terminal.println("-> Installing " + Strings.coalesceToEmpty(name) + "...");
} else {
terminal.println("-> Installing from " + URLDecoder.decode(url.toString(), "UTF-8") + "...");
}
pluginManager.downloadAndExtract(name, terminal, batch);
return ExitStatus.OK;
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.plugins; package org.elasticsearch.plugins;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.Terminal.Verbosity; import org.elasticsearch.common.cli.Terminal.Verbosity;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
@ -86,7 +87,7 @@ class PluginSecurity {
terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@");
// print all permissions: // print all permissions:
for (Permission permission : requested) { for (Permission permission : requested) {
terminal.println(Verbosity.NORMAL, "* %s", formatPermission(permission)); terminal.println(Verbosity.NORMAL, "* " + formatPermission(permission));
} }
terminal.println(Verbosity.NORMAL, "See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html"); terminal.println(Verbosity.NORMAL, "See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html");
terminal.println(Verbosity.NORMAL, "for descriptions of what these permissions allow and the associated risks."); terminal.println(Verbosity.NORMAL, "for descriptions of what these permissions allow and the associated risks.");
@ -151,7 +152,7 @@ class PluginSecurity {
} catch (NoSuchAlgorithmException e) { } catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
PluginManager.tryToDeletePath(terminal, emptyPolicyFile); IOUtils.rm(emptyPolicyFile);
// parse the plugin's policy file into a set of permissions // parse the plugin's policy file into a set of permissions
final Policy policy; final Policy policy;

View File

@ -0,0 +1,78 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE;
/**
* A command for the plugin cli to remove a plugin from elasticsearch.
*/
class RemovePluginCommand extends CliTool.Command {
private final String pluginName;
public RemovePluginCommand(Terminal terminal, String pluginName) {
super(terminal);
this.pluginName = pluginName;
}
@Override
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "...");
Path pluginDir = env.pluginsFile().resolve(pluginName);
if (Files.exists(pluginDir) == false) {
throw new UserError(CliTool.ExitStatus.USAGE, "Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins.");
}
List<Path> pluginPaths = new ArrayList<>();
Path pluginBinDir = env.binFile().resolve(pluginName);
if (Files.exists(pluginBinDir)) {
if (Files.isDirectory(pluginBinDir) == false) {
throw new UserError(CliTool.ExitStatus.IO_ERROR, "Bin dir for " + pluginName + " is not a directory");
}
pluginPaths.add(pluginBinDir);
terminal.println(VERBOSE, "Removing: " + pluginBinDir);
}
terminal.println(VERBOSE, "Removing: " + pluginDir);
Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName);
Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE);
pluginPaths.add(tmpPluginDir);
IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()]));
return CliTool.ExitStatus.OK;
}
}

View File

@ -20,13 +20,9 @@
package org.elasticsearch.rest.action.ingest; package org.elasticsearch.rest.action.ingest;
import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.WritePipelineResponse;
import org.elasticsearch.action.ingest.WritePipelineResponseRestListener;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestController;
@ -34,7 +30,6 @@ import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.support.AcknowledgedRestListener; import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestActions;
import java.io.IOException;
public class RestPutPipelineAction extends BaseRestHandler { public class RestPutPipelineAction extends BaseRestHandler {
@ -49,7 +44,7 @@ public class RestPutPipelineAction extends BaseRestHandler {
PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), RestActions.getRestContent(restRequest)); PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), RestActions.getRestContent(restRequest));
request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout()));
request.timeout(restRequest.paramAsTime("timeout", request.timeout())); request.timeout(restRequest.paramAsTime("timeout", request.timeout()));
client.admin().cluster().putPipeline(request, new WritePipelineResponseRestListener(channel)); client.admin().cluster().putPipeline(request, new AcknowledgedRestListener<>(channel));
} }
} }

View File

@ -47,6 +47,6 @@ public class RestSimulatePipelineAction extends BaseRestHandler {
SimulatePipelineRequest request = new SimulatePipelineRequest(RestActions.getRestContent(restRequest)); SimulatePipelineRequest request = new SimulatePipelineRequest(RestActions.getRestContent(restRequest));
request.setId(restRequest.param("id")); request.setId(restRequest.param("id"));
request.setVerbose(restRequest.paramAsBoolean("verbose", false)); request.setVerbose(restRequest.paramAsBoolean("verbose", false));
client.admin().cluster().simulatePipeline(request, new RestStatusToXContentListener<>(channel)); client.admin().cluster().simulatePipeline(request, new RestToXContentListener<>(channel));
} }
} }

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException; import java.io.IOException;
import java.util.function.Supplier;
/** /**
* Base class for delegating transport response to a transport channel * Base class for delegating transport response to a transport channel
@ -30,7 +31,7 @@ import java.io.IOException;
public abstract class TransportChannelResponseHandler<T extends TransportResponse> implements TransportResponseHandler<T> { public abstract class TransportChannelResponseHandler<T extends TransportResponse> implements TransportResponseHandler<T> {
/** /**
* Convenience method for delegating an empty response to the provided changed * Convenience method for delegating an empty response to the provided transport channel
*/ */
public static TransportChannelResponseHandler<TransportResponse.Empty> emptyResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) { public static TransportChannelResponseHandler<TransportResponse.Empty> emptyResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) {
return new TransportChannelResponseHandler<TransportResponse.Empty>(logger, channel, extraInfoOnError) { return new TransportChannelResponseHandler<TransportResponse.Empty>(logger, channel, extraInfoOnError) {
@ -41,6 +42,19 @@ public abstract class TransportChannelResponseHandler<T extends TransportRespons
}; };
} }
/**
* Convenience method for delegating a response provided by supplier to the provided transport channel
*/
public static <T extends TransportResponse> TransportChannelResponseHandler responseHandler(ESLogger logger, Supplier<T> responseSupplier, TransportChannel channel, String extraInfoOnError) {
return new TransportChannelResponseHandler<T>(logger, channel, extraInfoOnError) {
@Override
public T newInstance() {
return responseSupplier.get();
}
};
}
private final ESLogger logger; private final ESLogger logger;
private final TransportChannel channel; private final TransportChannel channel;
private final String extraInfoOnError; private final String extraInfoOnError;

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddress;
@ -39,8 +40,8 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException; import java.io.IOException;
@ -56,6 +57,8 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function; import java.util.function.Function;
import java.util.function.Supplier; import java.util.function.Supplier;
import static java.util.Collections.emptyList;
import static org.elasticsearch.common.settings.Setting.listSetting;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
/** /**
@ -92,9 +95,10 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
// tracer log // tracer log
public static final Setting<List<String>> TRACE_LOG_INCLUDE_SETTING = Setting.listSetting("transport.tracer.include", Collections.emptyList(), Function.identity(), true, Setting.Scope.CLUSTER); public static final Setting<List<String>> TRACE_LOG_INCLUDE_SETTING = listSetting("transport.tracer.include", emptyList(),
public static final Setting<List<String>> TRACE_LOG_EXCLUDE_SETTING = Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Setting.Scope.CLUSTER); Function.identity(), true, Scope.CLUSTER);
public static final Setting<List<String>> TRACE_LOG_EXCLUDE_SETTING = listSetting("transport.tracer.exclude",
Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Scope.CLUSTER);
private final ESLogger tracerLog; private final ESLogger tracerLog;
@ -757,7 +761,8 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
final TransportServiceAdapter adapter; final TransportServiceAdapter adapter;
final ThreadPool threadPool; final ThreadPool threadPool;
public DirectResponseChannel(ESLogger logger, DiscoveryNode localNode, String action, long requestId, TransportServiceAdapter adapter, ThreadPool threadPool) { public DirectResponseChannel(ESLogger logger, DiscoveryNode localNode, String action, long requestId,
TransportServiceAdapter adapter, ThreadPool threadPool) {
this.logger = logger; this.logger = logger;
this.localNode = localNode; this.localNode = localNode;
this.action = action; this.action = action;

View File

@ -19,11 +19,15 @@
package org.elasticsearch.transport; package org.elasticsearch.transport;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import java.util.List; import java.util.List;
import static java.util.Collections.emptyList; import static java.util.Collections.emptyList;
import static org.elasticsearch.common.settings.Setting.groupSetting;
import static org.elasticsearch.common.settings.Setting.intSetting;
import static org.elasticsearch.common.settings.Setting.listSetting;
/** /**
* a collection of settings related to transport components, which are also needed in org.elasticsearch.bootstrap.Security * a collection of settings related to transport components, which are also needed in org.elasticsearch.bootstrap.Security
@ -31,13 +35,13 @@ import static java.util.Collections.emptyList;
*/ */
final public class TransportSettings { final public class TransportSettings {
public static final Setting<List<String>> HOST = Setting.listSetting("transport.host", emptyList(), s -> s, false, Setting.Scope.CLUSTER); public static final Setting<List<String>> HOST = listSetting("transport.host", emptyList(), s -> s, false, Scope.CLUSTER);
public static final Setting<List<String>> PUBLISH_HOST = Setting.listSetting("transport.publish_host", HOST, s -> s, false, Setting.Scope.CLUSTER); public static final Setting<List<String>> PUBLISH_HOST = listSetting("transport.publish_host", HOST, s -> s, false, Scope.CLUSTER);
public static final Setting<List<String>> BIND_HOST = Setting.listSetting("transport.bind_host", HOST, s -> s, false, Setting.Scope.CLUSTER); public static final Setting<List<String>> BIND_HOST = listSetting("transport.bind_host", HOST, s -> s, false, Scope.CLUSTER);
public static final Setting<String> PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Setting.Scope.CLUSTER); public static final Setting<String> PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Scope.CLUSTER);
public static final Setting<Integer> PUBLISH_PORT = Setting.intSetting("transport.publish_port", -1, -1, false, Setting.Scope.CLUSTER); public static final Setting<Integer> PUBLISH_PORT = intSetting("transport.publish_port", -1, -1, false, Scope.CLUSTER);
public static final String DEFAULT_PROFILE = "default"; public static final String DEFAULT_PROFILE = "default";
public static final Setting<Settings> TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER); public static final Setting<Settings> TRANSPORT_PROFILES_SETTING = groupSetting("transport.profiles.", true, Scope.CLUSTER);
private TransportSettings() { private TransportSettings() {

View File

@ -97,7 +97,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1); int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1);
logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize); logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize);
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, LOCAL_TRANSPORT_THREAD_NAME_PREFIX); final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, LOCAL_TRANSPORT_THREAD_NAME_PREFIX);
this.workers = EsExecutors.newFixed(LOCAL_TRANSPORT_THREAD_NAME_PREFIX, workerCount, queueSize, threadFactory, threadPool.getThreadContext()); this.workers = EsExecutors.newFixed(LOCAL_TRANSPORT_THREAD_NAME_PREFIX, workerCount, queueSize, threadFactory,
threadPool.getThreadContext());
this.namedWriteableRegistry = namedWriteableRegistry; this.namedWriteableRegistry = namedWriteableRegistry;
} }
@ -199,7 +200,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
} }
@Override @Override
public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request,
TransportRequestOptions options) throws IOException, TransportException {
final Version version = Version.smallest(node.version(), this.version); final Version version = Version.smallest(node.version(), this.version);
try (BytesStreamOutput stream = new BytesStreamOutput()) { try (BytesStreamOutput stream = new BytesStreamOutput()) {
@ -237,7 +239,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
return this.workers; return this.workers;
} }
protected void messageReceived(byte[] data, String action, LocalTransport sourceTransport, Version version, @Nullable final Long sendRequestId) { protected void messageReceived(byte[] data, String action, LocalTransport sourceTransport, Version version,
@Nullable final Long sendRequestId) {
Transports.assertTransportThread(); Transports.assertTransportThread();
try { try {
transportServiceAdapter.received(data.length); transportServiceAdapter.received(data.length);
@ -278,7 +281,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
stream = new NamedWriteableAwareStreamInput(stream, namedWriteableRegistry); stream = new NamedWriteableAwareStreamInput(stream, namedWriteableRegistry);
final String action = stream.readString(); final String action = stream.readString();
transportServiceAdapter.onRequestReceived(requestId, action); transportServiceAdapter.onRequestReceived(requestId, action);
final LocalTransportChannel transportChannel = new LocalTransportChannel(this, transportServiceAdapter, sourceTransport, action, requestId, version); final LocalTransportChannel transportChannel = new LocalTransportChannel(this, transportServiceAdapter, sourceTransport, action,
requestId, version);
try { try {
final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action);
if (reg == null) { if (reg == null) {
@ -334,7 +338,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
try { try {
response.readFrom(buffer); response.readFrom(buffer);
} catch (Throwable e) { } catch (Throwable e) {
handleException(handler, new TransportSerializationException("Failed to deserialize response of type [" + response.getClass().getName() + "]", e)); handleException(handler, new TransportSerializationException(
"Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
return; return;
} }
handleParsedResponse(response, handler); handleParsedResponse(response, handler);

View File

@ -46,7 +46,8 @@ public class LocalTransportChannel implements TransportChannel {
private final long requestId; private final long requestId;
private final Version version; private final Version version;
public LocalTransportChannel(LocalTransport sourceTransport, TransportServiceAdapter sourceTransportServiceAdapter, LocalTransport targetTransport, String action, long requestId, Version version) { public LocalTransportChannel(LocalTransport sourceTransport, TransportServiceAdapter sourceTransportServiceAdapter,
LocalTransport targetTransport, String action, long requestId, Version version) {
this.sourceTransport = sourceTransport; this.sourceTransport = sourceTransport;
this.sourceTransportServiceAdapter = sourceTransportServiceAdapter; this.sourceTransportServiceAdapter = sourceTransportServiceAdapter;
this.targetTransport = targetTransport; this.targetTransport = targetTransport;
@ -94,7 +95,8 @@ public class LocalTransportChannel implements TransportChannel {
public void sendResponse(Throwable error) throws IOException { public void sendResponse(Throwable error) throws IOException {
BytesStreamOutput stream = new BytesStreamOutput(); BytesStreamOutput stream = new BytesStreamOutput();
writeResponseExceptionHeader(stream); writeResponseExceptionHeader(stream);
RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), targetTransport.boundAddress().boundAddresses()[0], action, error); RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(),
targetTransport.boundAddress().boundAddresses()[0], action, error);
stream.writeThrowable(tx); stream.writeThrowable(tx);
final byte[] data = stream.bytes().toBytes(); final byte[] data = stream.bytes().toBytes();

View File

@ -116,7 +116,9 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
} catch (NotCompressedException ex) { } catch (NotCompressedException ex) {
int maxToRead = Math.min(buffer.readableBytes(), 10); int maxToRead = Math.min(buffer.readableBytes(), 10);
int offset = buffer.readerIndex(); int offset = buffer.readerIndex();
StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead).append("] content bytes out of [").append(buffer.readableBytes()).append("] readable bytes with message size [").append(size).append("] ").append("] are ["); StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead)
.append("] content bytes out of [").append(buffer.readableBytes())
.append("] readable bytes with message size [").append(size).append("] ").append("] are [");
for (int i = 0; i < maxToRead; i++) { for (int i = 0; i < maxToRead; i++) {
sb.append(buffer.getByte(offset + i)).append(","); sb.append(buffer.getByte(offset + i)).append(",");
} }
@ -134,15 +136,17 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
final int nextByte = streamIn.read(); final int nextByte = streamIn.read();
// calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker
if (nextByte != -1) { if (nextByte != -1) {
throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" + action
+ action + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting"); + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting");
} }
if (buffer.readerIndex() < expectedIndexReader) { if (buffer.readerIndex() < expectedIndexReader) {
throw new IllegalStateException("Message is fully read (request), yet there are " + (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting"); throw new IllegalStateException("Message is fully read (request), yet there are "
+ (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting");
} }
if (buffer.readerIndex() > expectedIndexReader) { if (buffer.readerIndex() > expectedIndexReader) {
throw new IllegalStateException("Message read past expected size (request) for requestId [" + requestId + "], action [" throw new IllegalStateException(
+ action + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting"); "Message read past expected size (request) for requestId [" + requestId + "], action [" + action
+ "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting");
} }
} else { } else {
@ -163,11 +167,12 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
+ handler + "], error [" + TransportStatus.isError(status) + "]; resetting"); + handler + "], error [" + TransportStatus.isError(status) + "]; resetting");
} }
if (buffer.readerIndex() < expectedIndexReader) { if (buffer.readerIndex() < expectedIndexReader) {
throw new IllegalStateException("Message is fully read (response), yet there are " + (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting"); throw new IllegalStateException("Message is fully read (response), yet there are "
+ (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting");
} }
if (buffer.readerIndex() > expectedIndexReader) { if (buffer.readerIndex() > expectedIndexReader) {
throw new IllegalStateException("Message read past expected size (response) for requestId [" + requestId + "], handler [" throw new IllegalStateException("Message read past expected size (response) for requestId [" + requestId
+ handler + "], error [" + TransportStatus.isError(status) + "]; resetting"); + "], handler [" + handler + "], error [" + TransportStatus.isError(status) + "]; resetting");
} }
} }
@ -193,7 +198,8 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
try { try {
response.readFrom(buffer); response.readFrom(buffer);
} catch (Throwable e) { } catch (Throwable e) {
handleException(handler, new TransportSerializationException("Failed to deserialize response of type [" + response.getClass().getName() + "]", e)); handleException(handler, new TransportSerializationException(
"Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
return; return;
} }
try { try {
@ -247,7 +253,8 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
buffer = new NamedWriteableAwareStreamInput(buffer, transport.namedWriteableRegistry); buffer = new NamedWriteableAwareStreamInput(buffer, transport.namedWriteableRegistry);
final String action = buffer.readString(); final String action = buffer.readString();
transportServiceAdapter.onRequestReceived(requestId, action); transportServiceAdapter.onRequestReceived(requestId, action);
final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, requestId, version, profileName); final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel,
requestId, version, profileName);
try { try {
final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action);
if (reg == null) { if (reg == null) {

View File

@ -42,6 +42,7 @@ import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.NetworkService.TcpSettings; import org.elasticsearch.common.network.NetworkService.TcpSettings;
import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress;
@ -119,6 +120,10 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.settings.Setting.boolSetting;
import static org.elasticsearch.common.settings.Setting.byteSizeSetting;
import static org.elasticsearch.common.settings.Setting.intSetting;
import static org.elasticsearch.common.settings.Setting.timeSetting;
import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException; import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException;
import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException; import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException;
@ -143,21 +148,33 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss"; public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss";
public static final Setting<Integer> WORKER_COUNT = new Setting<>("transport.netty.worker_count", public static final Setting<Integer> WORKER_COUNT = new Setting<>("transport.netty.worker_count",
(s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2),
false, Setting.Scope.CLUSTER); (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), false, Setting.Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_RECOVERY = Setting.intSetting("transport.connections_per_node.recovery", 2, 1, false, Setting.Scope.CLUSTER); public static final Setting<Integer> CONNECTIONS_PER_NODE_RECOVERY = intSetting("transport.connections_per_node.recovery", 2, 1, false,
public static final Setting<Integer> CONNECTIONS_PER_NODE_BULK = Setting.intSetting("transport.connections_per_node.bulk", 3, 1, false, Setting.Scope.CLUSTER); Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_REG = Setting.intSetting("transport.connections_per_node.reg", 6, 1, false, Setting.Scope.CLUSTER); public static final Setting<Integer> CONNECTIONS_PER_NODE_BULK = intSetting("transport.connections_per_node.bulk", 3, 1, false,
public static final Setting<Integer> CONNECTIONS_PER_NODE_STATE = Setting.intSetting("transport.connections_per_node.state", 1, 1, false, Setting.Scope.CLUSTER); Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_PING = Setting.intSetting("transport.connections_per_node.ping", 1, 1, false, Setting.Scope.CLUSTER); public static final Setting<Integer> CONNECTIONS_PER_NODE_REG = intSetting("transport.connections_per_node.reg", 6, 1, false,
Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_STATE = intSetting("transport.connections_per_node.state", 1, 1, false,
Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_PING = intSetting("transport.connections_per_node.ping", 1, 1, false,
Scope.CLUSTER);
// the scheduled internal ping interval setting, defaults to disabled (-1) // the scheduled internal ping interval setting, defaults to disabled (-1)
public static final Setting<TimeValue> PING_SCHEDULE = Setting.timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER); public static final Setting<TimeValue> PING_SCHEDULE = timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false,
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, false, Setting.Scope.CLUSTER); Setting.Scope.CLUSTER);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("transport.tcp.connect_timeout", TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER); public static final Setting<Boolean> TCP_BLOCKING_CLIENT = boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT,
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, false, Setting.Scope.CLUSTER); false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_KEEP_ALIVE = Setting.boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, false, Setting.Scope.CLUSTER); public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = timeSetting("transport.tcp.connect_timeout",
public static final Setting<Boolean> TCP_BLOCKING_SERVER = Setting.boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, false, Setting.Scope.CLUSTER); TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_REUSE_ADDRESS = Setting.boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, false, Setting.Scope.CLUSTER); public static final Setting<Boolean> TCP_NO_DELAY = boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, false,
Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_KEEP_ALIVE = boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, false,
Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_SERVER = boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER,
false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_REUSE_ADDRESS = boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS,
false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
@ -165,9 +182,9 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
public static final Setting<ByteSizeValue> NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
public static final Setting<Integer> NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, false, Setting.Scope.CLUSTER); public static final Setting<Integer> NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, false, Setting.Scope.CLUSTER);
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting("transport.netty.receive_predictor_size", public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting(
"transport.netty.receive_predictor_size",
settings -> { settings -> {
long defaultReceiverPredictor = 512 * 1024; long defaultReceiverPredictor = 512 * 1024;
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
@ -177,10 +194,11 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} }
return new ByteSizeValue(defaultReceiverPredictor).toString(); return new ByteSizeValue(defaultReceiverPredictor).toString();
}, false, Setting.Scope.CLUSTER); }, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MIN = Setting.byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("transport.netty.receive_predictor_min",
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = Setting.byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); NETTY_RECEIVE_PREDICTOR_SIZE, false, Scope.CLUSTER);
public static final Setting<Integer> NETTY_BOSS_COUNT = Setting.intSetting("transport.netty.boss_count", 1, 1, false, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("transport.netty.receive_predictor_max",
NETTY_RECEIVE_PREDICTOR_SIZE, false, Scope.CLUSTER);
public static final Setting<Integer> NETTY_BOSS_COUNT = intSetting("transport.netty.boss_count", 1, 1, false, Scope.CLUSTER);
protected final NetworkService networkService; protected final NetworkService networkService;
protected final Version version; protected final Version version;
@ -226,7 +244,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
final ScheduledPing scheduledPing; final ScheduledPing scheduledPing;
@Inject @Inject
public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version, NamedWriteableRegistry namedWriteableRegistry) { public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version,
NamedWriteableRegistry namedWriteableRegistry) {
super(settings); super(settings);
this.threadPool = threadPool; this.threadPool = threadPool;
this.networkService = networkService; this.networkService = networkService;
@ -252,7 +271,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) { if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes()); receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
} else { } else {
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes()); receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(),
(int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
} }
this.scheduledPing = new ScheduledPing(); this.scheduledPing = new ScheduledPing();
@ -305,7 +325,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
String name = entry.getKey(); String name = entry.getKey();
if (!Strings.hasLength(name)) { if (!Strings.hasLength(name)) {
logger.info("transport profile configured without a name. skipping profile with settings [{}]", profileSettings.toDelimitedString(',')); logger.info("transport profile configured without a name. skipping profile with settings [{}]",
profileSettings.toDelimitedString(','));
continue; continue;
} else if (TransportSettings.DEFAULT_PROFILE.equals(name)) { } else if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
profileSettings = settingsBuilder() profileSettings = settingsBuilder()
@ -345,13 +366,16 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
private ClientBootstrap createClientBootstrap() { private ClientBootstrap createClientBootstrap() {
if (blockingClient) { if (blockingClient) {
clientBootstrap = new ClientBootstrap(new OioClientSocketChannelFactory(Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)))); clientBootstrap = new ClientBootstrap(new OioClientSocketChannelFactory(
Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX))));
} else { } else {
int bossCount = NETTY_BOSS_COUNT.get(settings); int bossCount = NETTY_BOSS_COUNT.get(settings);
clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory( clientBootstrap = new ClientBootstrap(
new NioClientSocketChannelFactory(
Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)), Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)),
bossCount, bossCount,
new NioWorkerPool(Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)), workerCount), new NioWorkerPool(Executors.newCachedThreadPool(
daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)), workerCount),
new HashedWheelTimer(daemonThreadFactory(settings, "transport_client_timer")))); new HashedWheelTimer(daemonThreadFactory(settings, "transport_client_timer"))));
} }
clientBootstrap.setPipelineFactory(configureClientChannelPipelineFactory()); clientBootstrap.setPipelineFactory(configureClientChannelPipelineFactory());
@ -403,12 +427,14 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TcpSettings.TCP_REUSE_ADDRESS.get(settings)); boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TcpSettings.TCP_REUSE_ADDRESS.get(settings));
fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress); fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress);
ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE.get(settings)); ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size",
TCP_SEND_BUFFER_SIZE.get(settings));
if (fallbackTcpSendBufferSize.bytes() >= 0) { if (fallbackTcpSendBufferSize.bytes() >= 0) {
fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize); fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize);
} }
ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE.get(settings)); ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size",
TCP_RECEIVE_BUFFER_SIZE.get(settings));
if (fallbackTcpBufferSize.bytes() >= 0) { if (fallbackTcpBufferSize.bytes() >= 0) {
fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize); fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize);
} }
@ -485,7 +511,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
return boundSocket.get(); return boundSocket.get();
} }
private BoundTransportAddress createBoundTransportAddress(String name, Settings profileSettings, List<InetSocketAddress> boundAddresses) { private BoundTransportAddress createBoundTransportAddress(String name, Settings profileSettings,
List<InetSocketAddress> boundAddresses) {
String[] boundAddressesHostStrings = new String[boundAddresses.size()]; String[] boundAddressesHostStrings = new String[boundAddresses.size()];
TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()]; TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()];
for (int i = 0; i < boundAddresses.size(); i++) { for (int i = 0; i < boundAddresses.size(); i++) {
@ -531,7 +558,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
// TODO: In case of DEFAULT_PROFILE we should probably fail here, as publish address does not match any bound address // TODO: In case of DEFAULT_PROFILE we should probably fail here, as publish address does not match any bound address
// In case of a custom profile, we might use the publish address of the default profile // In case of a custom profile, we might use the publish address of the default profile
publishPort = boundAddresses.get(0).getPort(); publishPort = boundAddresses.get(0).getPort();
logger.warn("Publish port not found by matching publish address [{}] to bound addresses [{}], falling back to port [{}] of first bound address", publishInetAddress, boundAddresses, publishPort); logger.warn("Publish port not found by matching publish address [{}] to bound addresses [{}], "
+ "falling back to port [{}] of first bound address", publishInetAddress, boundAddresses, publishPort);
} }
final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort));
@ -549,8 +577,13 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.getDefault(settings); ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.getDefault(settings);
ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.getDefault(settings); ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.getDefault(settings);
logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]", if (logger.isDebugEnabled()) {
name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, receivePredictorMax); logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], "
+ "connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]",
name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery,
connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin,
receivePredictorMax);
}
final ThreadFactory bossFactory = daemonThreadFactory(this.settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX, name); final ThreadFactory bossFactory = daemonThreadFactory(this.settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX, name);
final ThreadFactory workerFactory = daemonThreadFactory(this.settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, name); final ThreadFactory workerFactory = daemonThreadFactory(this.settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, name);
@ -739,7 +772,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
return; return;
} }
if (isCloseConnectionException(e.getCause())) { if (isCloseConnectionException(e.getCause())) {
logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel()); logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(),
ctx.getChannel());
// close the channel, which will cause a node to be disconnected if relevant // close the channel, which will cause a node to be disconnected if relevant
ctx.getChannel().close(); ctx.getChannel().close();
disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
@ -754,7 +788,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
ctx.getChannel().close(); ctx.getChannel().close();
disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
} else if (e.getCause() instanceof CancelledKeyException) { } else if (e.getCause() instanceof CancelledKeyException) {
logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel()); logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(),
ctx.getChannel());
// close the channel as safe measure, which will cause a node to be disconnected if relevant // close the channel as safe measure, which will cause a node to be disconnected if relevant
ctx.getChannel().close(); ctx.getChannel().close();
disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
@ -800,7 +835,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} }
@Override @Override
public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request,
TransportRequestOptions options) throws IOException, TransportException {
Channel targetChannel = nodeChannel(node, options); Channel targetChannel = nodeChannel(node, options);
@ -902,7 +938,9 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
if (light) { if (light) {
nodeChannels = connectToChannelsLight(node); nodeChannels = connectToChannelsLight(node);
} else { } else {
nodeChannels = new NodeChannels(new Channel[connectionsPerNodeRecovery], new Channel[connectionsPerNodeBulk], new Channel[connectionsPerNodeReg], new Channel[connectionsPerNodeState], new Channel[connectionsPerNodePing]); nodeChannels = new NodeChannels(new Channel[connectionsPerNodeRecovery], new Channel[connectionsPerNodeBulk],
new Channel[connectionsPerNodeReg], new Channel[connectionsPerNodeState],
new Channel[connectionsPerNodePing]);
try { try {
connectToChannels(nodeChannels, node); connectToChannels(nodeChannels, node);
} catch (Throwable e) { } catch (Throwable e) {

View File

@ -53,7 +53,8 @@ public class NettyTransportChannel implements TransportChannel {
private final long requestId; private final long requestId;
private final String profileName; private final String profileName;
public NettyTransportChannel(NettyTransport transport, TransportServiceAdapter transportServiceAdapter, String action, Channel channel, long requestId, Version version, String profileName) { public NettyTransportChannel(NettyTransport transport, TransportServiceAdapter transportServiceAdapter, String action, Channel channel,
long requestId, Version version, String profileName) {
this.transportServiceAdapter = transportServiceAdapter; this.transportServiceAdapter = transportServiceAdapter;
this.version = version; this.version = version;
this.transport = transport; this.transport = transport;
@ -119,7 +120,8 @@ public class NettyTransportChannel implements TransportChannel {
public void sendResponse(Throwable error) throws IOException { public void sendResponse(Throwable error) throws IOException {
BytesStreamOutput stream = new BytesStreamOutput(); BytesStreamOutput stream = new BytesStreamOutput();
stream.skip(NettyHeader.HEADER_SIZE); stream.skip(NettyHeader.HEADER_SIZE);
RemoteTransportException tx = new RemoteTransportException(transport.nodeName(), transport.wrapAddress(channel.getLocalAddress()), action, error); RemoteTransportException tx = new RemoteTransportException(transport.nodeName(), transport.wrapAddress(channel.getLocalAddress()),
action, error);
stream.writeThrowable(tx); stream.writeThrowable(tx);
byte status = 0; byte status = 0;
status = TransportStatus.setResponse(status); status = TransportStatus.setResponse(status);

View File

@ -80,8 +80,8 @@ public class SizeHeaderFrameDecoder extends FrameDecoder {
} }
// safety against too large frames being sent // safety against too large frames being sent
if (dataLen > NINETY_PER_HEAP_SIZE) { if (dataLen > NINETY_PER_HEAP_SIZE) {
throw new TooLongFrameException( throw new TooLongFrameException("transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded ["
"transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]");
} }
if (buffer.readableBytes() < dataLen + 6) { if (buffer.readableBytes() < dataLen + 6) {

View File

@ -20,6 +20,7 @@
package org.elasticsearch.tribe; package org.elasticsearch.tribe;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
@ -83,8 +84,10 @@ import static java.util.Collections.unmodifiableMap;
*/ */
public class TribeService extends AbstractLifecycleComponent<TribeService> { public class TribeService extends AbstractLifecycleComponent<TribeService> {
public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE)); public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false,
public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE)); RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE));
public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false,
RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE));
public static Settings processSettings(Settings settings) { public static Settings processSettings(Settings settings) {
if (TRIBE_NAME_SETTING.exists(settings)) { if (TRIBE_NAME_SETTING.exists(settings)) {
@ -106,7 +109,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
Settings.Builder sb = Settings.builder().put(settings); Settings.Builder sb = Settings.builder().put(settings);
sb.put(Node.NODE_CLIENT_SETTING.getKey(), true); // this node should just act as a node client sb.put(Node.NODE_CLIENT_SETTING.getKey(), true); // this node should just act as a node client
sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery
sb.put(DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); // nothing is going to be discovered, since no master will be elected // nothing is going to be discovered, since no master will be elected
sb.put(DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0);
if (sb.get("cluster.name") == null) { if (sb.get("cluster.name") == null) {
sb.put("cluster.name", "tribe_" + Strings.randomBase64UUID()); // make sure it won't join other tribe nodes in the same JVM sb.put("cluster.name", "tribe_" + Strings.randomBase64UUID()); // make sure it won't join other tribe nodes in the same JVM
} }
@ -114,7 +118,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
return sb.build(); return sb.build();
} }
private static final Setting<String> TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", false, Setting.Scope.CLUSTER); // internal settings only // internal settings only
private static final Setting<String> TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", false, Setting.Scope.CLUSTER);
private final ClusterService clusterService; private final ClusterService clusterService;
private final String[] blockIndicesWrite; private final String[] blockIndicesWrite;
private final String[] blockIndicesRead; private final String[] blockIndicesRead;
@ -125,14 +130,20 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
if (ON_CONFLICT_ANY.equals(s) || ON_CONFLICT_DROP.equals(s) || s.startsWith(ON_CONFLICT_PREFER)) { if (ON_CONFLICT_ANY.equals(s) || ON_CONFLICT_DROP.equals(s) || s.startsWith(ON_CONFLICT_PREFER)) {
return s; return s;
} }
throw new IllegalArgumentException("Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: " +s); throw new IllegalArgumentException(
"Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: " + s);
}, false, Setting.Scope.CLUSTER); }, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> BLOCKS_METADATA_SETTING = Setting.boolSetting("tribe.blocks.metadata", false, false, Setting.Scope.CLUSTER); public static final Setting<Boolean> BLOCKS_METADATA_SETTING = Setting.boolSetting("tribe.blocks.metadata", false, false,
public static final Setting<Boolean> BLOCKS_WRITE_SETTING = Setting.boolSetting("tribe.blocks.write", false, false, Setting.Scope.CLUSTER); Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_WRITE_INDICES_SETTING = Setting.listSetting("tribe.blocks.write.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); public static final Setting<Boolean> BLOCKS_WRITE_SETTING = Setting.boolSetting("tribe.blocks.write", false, false,
public static final Setting<List<String>> BLOCKS_READ_INDICES_SETTING = Setting.listSetting("tribe.blocks.read.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_METADATA_INDICES_SETTING = Setting.listSetting("tribe.blocks.metadata.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); public static final Setting<List<String>> BLOCKS_WRITE_INDICES_SETTING = Setting.listSetting("tribe.blocks.write.indices",
Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_READ_INDICES_SETTING = Setting.listSetting("tribe.blocks.read.indices",
Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_METADATA_INDICES_SETTING = Setting.listSetting("tribe.blocks.metadata.indices",
Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
private final String onConflict; private final String onConflict;
private final Set<String> droppedIndices = ConcurrentCollections.newConcurrentSet(); private final Set<String> droppedIndices = ConcurrentCollections.newConcurrentSet();
@ -304,7 +315,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
tribeAttr.put(attr.key, attr.value); tribeAttr.put(attr.key, attr.value);
} }
tribeAttr.put(TRIBE_NAME_SETTING.getKey(), tribeName); tribeAttr.put(TRIBE_NAME_SETTING.getKey(), tribeName);
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(),
tribe.address(), unmodifiableMap(tribeAttr), tribe.version());
clusterStateChanged = true; clusterStateChanged = true;
logger.info("[{}] adding node [{}]", tribeName, discoNode); logger.info("[{}] adding node [{}]", tribeName, discoNode);
nodes.put(discoNode); nodes.put(discoNode);
@ -328,7 +340,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
// always make sure to update the metadata and routing table, in case // always make sure to update the metadata and routing table, in case
// there are changes in them (new mapping, shards moving from initializing to started) // there are changes in them (new mapping, shards moving from initializing to started)
routingTable.add(tribeState.routingTable().index(index.getIndex())); routingTable.add(tribeState.routingTable().index(index.getIndex()));
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME_SETTING.getKey(), tribeName).build(); Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings())
.put(TRIBE_NAME_SETTING.getKey(), tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
} }
} }
@ -357,7 +370,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
} else if (ON_CONFLICT_DROP.equals(onConflict)) { } else if (ON_CONFLICT_DROP.equals(onConflict)) {
// drop the indices, there is a conflict // drop the indices, there is a conflict
clusterStateChanged = true; clusterStateChanged = true;
logger.info("[{}] dropping index {} due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); logger.info("[{}] dropping index {} due to conflict with [{}]", tribeName, tribeIndex.getIndex(),
existingFromTribe);
removeIndex(blocks, metaData, routingTable, tribeIndex); removeIndex(blocks, metaData, routingTable, tribeIndex);
droppedIndices.add(tribeIndex.getIndex().getName()); droppedIndices.add(tribeIndex.getIndex().getName());
} else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) {
@ -366,7 +380,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
if (tribeName.equals(preferredTribeName)) { if (tribeName.equals(preferredTribeName)) {
// the new one is hte preferred one, replace... // the new one is hte preferred one, replace...
clusterStateChanged = true; clusterStateChanged = true;
logger.info("[{}] adding index {}, preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); logger.info("[{}] adding index {}, preferred over [{}]", tribeName, tribeIndex.getIndex(),
existingFromTribe);
removeIndex(blocks, metaData, routingTable, tribeIndex); removeIndex(blocks, metaData, routingTable, tribeIndex);
addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
} // else: either the existing one is the preferred one, or we haven't seen one, carry on } // else: either the existing one is the preferred one, or we haven't seen one, carry on
@ -378,17 +393,20 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
if (!clusterStateChanged) { if (!clusterStateChanged) {
return currentState; return currentState;
} else { } else {
return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build(); return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData)
.routingTable(routingTable.build()).build();
} }
} }
private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) { private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable,
IndexMetaData index) {
metaData.remove(index.getIndex().getName()); metaData.remove(index.getIndex().getName());
routingTable.remove(index.getIndex().getName()); routingTable.remove(index.getIndex().getName());
blocks.removeIndexBlocks(index.getIndex().getName()); blocks.removeIndexBlocks(index.getIndex().getName());
} }
private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData,
RoutingTable.Builder routingTable, IndexMetaData tribeIndex) {
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME_SETTING.getKey(), tribeName).build(); Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME_SETTING.getKey(), tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex())); routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex()));

View File

@ -83,7 +83,8 @@ public class ResourceWatcherService extends AbstractLifecycleComponent<ResourceW
TimeValue interval = settings.getAsTime("resource.reload.interval.low", Frequency.LOW.interval); TimeValue interval = settings.getAsTime("resource.reload.interval.low", Frequency.LOW.interval);
lowMonitor = new ResourceMonitor(interval, Frequency.LOW); lowMonitor = new ResourceMonitor(interval, Frequency.LOW);
interval = settings.getAsTime("resource.reload.interval.medium", settings.getAsTime("resource.reload.interval", Frequency.MEDIUM.interval)); interval = settings.getAsTime("resource.reload.interval.medium",
settings.getAsTime("resource.reload.interval", Frequency.MEDIUM.interval));
mediumMonitor = new ResourceMonitor(interval, Frequency.MEDIUM); mediumMonitor = new ResourceMonitor(interval, Frequency.MEDIUM);
interval = settings.getAsTime("resource.reload.interval.high", Frequency.HIGH.interval); interval = settings.getAsTime("resource.reload.interval.high", Frequency.HIGH.interval);
highMonitor = new ResourceMonitor(interval, Frequency.HIGH); highMonitor = new ResourceMonitor(interval, Frequency.HIGH);

View File

@ -13,16 +13,11 @@ DESCRIPTION
Officially supported or commercial plugins require just the plugin name: Officially supported or commercial plugins require just the plugin name:
plugin install analysis-icu plugin install analysis-icu
plugin install shield plugin install x-pack
Plugins from GitHub require 'username/repository' or 'username/repository/version': Plugins from Maven Central require 'groupId:artifactId:version':
plugin install lmenezes/elasticsearch-kopf plugin install org.elasticsearch:mapper-attachments:3.0.0
plugin install lmenezes/elasticsearch-kopf/1.5.7
Plugins from Maven Central or Sonatype require 'groupId/artifactId/version':
plugin install org.elasticsearch/elasticsearch-mapper-attachments/2.6.0
Plugins can be installed from a custom URL or file location as follows: Plugins can be installed from a custom URL or file location as follows:
@ -42,7 +37,6 @@ OFFICIAL PLUGINS
- discovery-azure - discovery-azure
- discovery-ec2 - discovery-ec2
- discovery-gce - discovery-gce
- discovery-multicast
- ingest-geoip - ingest-geoip
- lang-javascript - lang-javascript
- lang-painless - lang-painless
@ -58,8 +52,6 @@ OFFICIAL PLUGINS
OPTIONS OPTIONS
-t,--timeout Timeout until the plugin download is abort
-v,--verbose Verbose output -v,--verbose Verbose output
-h,--help Shows this message -h,--help Shows this message

View File

@ -24,6 +24,7 @@ import org.elasticsearch.action.TimestampParsingException;
import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.client.AbstractClientHeadersTestCase; import org.elasticsearch.client.AbstractClientHeadersTestCase;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -591,7 +592,14 @@ public class ExceptionSerializationTests extends ESTestCase {
assertEquals("foo", e.getHeader("foo").get(0)); assertEquals("foo", e.getHeader("foo").get(0));
assertEquals("bar", e.getHeader("foo").get(1)); assertEquals("bar", e.getHeader("foo").get(1));
assertSame(status, e.status()); assertSame(status, e.status());
}
public void testNoLongerPrimaryShardException() throws IOException {
ShardId shardId = new ShardId(new Index(randomAsciiOfLength(4), randomAsciiOfLength(4)), randomIntBetween(0, Integer.MAX_VALUE));
String msg = randomAsciiOfLength(4);
ShardStateAction.NoLongerPrimaryShardException ex = serialize(new ShardStateAction.NoLongerPrimaryShardException(shardId, msg));
assertEquals(shardId, ex.getShardId());
assertEquals(msg, ex.getMessage());
} }
public static class UnknownHeaderException extends ElasticsearchException { public static class UnknownHeaderException extends ElasticsearchException {
@ -776,6 +784,7 @@ public class ExceptionSerializationTests extends ESTestCase {
ids.put(139, null); ids.put(139, null);
ids.put(140, org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class); ids.put(140, org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class);
ids.put(141, org.elasticsearch.index.query.QueryShardException.class); ids.put(141, org.elasticsearch.index.query.QueryShardException.class);
ids.put(142, ShardStateAction.NoLongerPrimaryShardException.class);
Map<Class<? extends ElasticsearchException>, Integer> reverse = new HashMap<>(); Map<Class<? extends ElasticsearchException>, Integer> reverse = new HashMap<>();
for (Map.Entry<Integer, Class<? extends ElasticsearchException>> entry : ids.entrySet()) { for (Map.Entry<Integer, Class<? extends ElasticsearchException>> entry : ids.entrySet()) {

View File

@ -21,13 +21,11 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.io.IOException; import java.io.IOException;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.nullValue;
public class WritePipelineResponseTests extends ESTestCase { public class WritePipelineResponseTests extends ESTestCase {
@ -45,17 +43,13 @@ public class WritePipelineResponseTests extends ESTestCase {
} }
public void testSerializationWithError() throws IOException { public void testSerializationWithError() throws IOException {
PipelineFactoryError error = new PipelineFactoryError("error"); WritePipelineResponse response = new WritePipelineResponse();
WritePipelineResponse response = new WritePipelineResponse(error);
BytesStreamOutput out = new BytesStreamOutput(); BytesStreamOutput out = new BytesStreamOutput();
response.writeTo(out); response.writeTo(out);
StreamInput streamInput = StreamInput.wrap(out.bytes()); StreamInput streamInput = StreamInput.wrap(out.bytes());
WritePipelineResponse otherResponse = new WritePipelineResponse(); WritePipelineResponse otherResponse = new WritePipelineResponse();
otherResponse.readFrom(streamInput); otherResponse.readFrom(streamInput);
assertThat(otherResponse.getError().getReason(), equalTo(response.getError().getReason())); assertThat(otherResponse.isAcknowledged(), equalTo(response.isAcknowledged()));
assertThat(otherResponse.getError().getProcessorType(), equalTo(response.getError().getProcessorType()));
assertThat(otherResponse.getError().getProcessorTag(), equalTo(response.getError().getProcessorTag()));
assertThat(otherResponse.getError().getProcessorPropertyName(), equalTo(response.getError().getProcessorPropertyName()));
} }
} }

View File

@ -57,11 +57,11 @@ public class ClusterStateCreationUtils {
* Creates cluster state with and index that has one shard and #(replicaStates) replicas * Creates cluster state with and index that has one shard and #(replicaStates) replicas
* *
* @param index name of the index * @param index name of the index
* @param primaryLocal if primary should coincide with the local node in the cluster state * @param activePrimaryLocal if active primary should coincide with the local node in the cluster state
* @param primaryState state of primary * @param primaryState state of primary
* @param replicaStates states of the replicas. length of this array determines also the number of replicas * @param replicaStates states of the replicas. length of this array determines also the number of replicas
*/ */
public static ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) { public static ClusterState state(String index, boolean activePrimaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) {
final int numberOfReplicas = replicaStates.length; final int numberOfReplicas = replicaStates.length;
int numberOfNodes = numberOfReplicas + 1; int numberOfNodes = numberOfReplicas + 1;
@ -97,7 +97,7 @@ public class ClusterStateCreationUtils {
String relocatingNode = null; String relocatingNode = null;
UnassignedInfo unassignedInfo = null; UnassignedInfo unassignedInfo = null;
if (primaryState != ShardRoutingState.UNASSIGNED) { if (primaryState != ShardRoutingState.UNASSIGNED) {
if (primaryLocal) { if (activePrimaryLocal) {
primaryNode = newNode(0).id(); primaryNode = newNode(0).id();
unassignedNodes.remove(primaryNode); unassignedNodes.remove(primaryNode);
} else { } else {
@ -174,12 +174,12 @@ public class ClusterStateCreationUtils {
* Primary will be STARTED in cluster state but replicas will be one of UNASSIGNED, INITIALIZING, STARTED or RELOCATING. * Primary will be STARTED in cluster state but replicas will be one of UNASSIGNED, INITIALIZING, STARTED or RELOCATING.
* *
* @param index name of the index * @param index name of the index
* @param primaryLocal if primary should coincide with the local node in the cluster state * @param activePrimaryLocal if active primary should coincide with the local node in the cluster state
* @param numberOfReplicas number of replicas * @param numberOfReplicas number of replicas
*/ */
public static ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) { public static ClusterState stateWithActivePrimary(String index, boolean activePrimaryLocal, int numberOfReplicas) {
int assignedReplicas = randomIntBetween(0, numberOfReplicas); int assignedReplicas = randomIntBetween(0, numberOfReplicas);
return stateWithStartedPrimary(index, primaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas); return stateWithActivePrimary(index, activePrimaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas);
} }
/** /**
@ -188,11 +188,11 @@ public class ClusterStateCreationUtils {
* some (assignedReplicas) will be one of INITIALIZING, STARTED or RELOCATING. * some (assignedReplicas) will be one of INITIALIZING, STARTED or RELOCATING.
* *
* @param index name of the index * @param index name of the index
* @param primaryLocal if primary should coincide with the local node in the cluster state * @param activePrimaryLocal if active primary should coincide with the local node in the cluster state
* @param assignedReplicas number of replicas that should have INITIALIZING, STARTED or RELOCATING state * @param assignedReplicas number of replicas that should have INITIALIZING, STARTED or RELOCATING state
* @param unassignedReplicas number of replicas that should be unassigned * @param unassignedReplicas number of replicas that should be unassigned
*/ */
public static ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int assignedReplicas, int unassignedReplicas) { public static ClusterState stateWithActivePrimary(String index, boolean activePrimaryLocal, int assignedReplicas, int unassignedReplicas) {
ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas]; ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas];
// no point in randomizing - node assignment later on does it too. // no point in randomizing - node assignment later on does it too.
for (int i = 0; i < assignedReplicas; i++) { for (int i = 0; i < assignedReplicas; i++) {
@ -201,7 +201,7 @@ public class ClusterStateCreationUtils {
for (int i = assignedReplicas; i < replicaStates.length; i++) { for (int i = assignedReplicas; i < replicaStates.length; i++) {
replicaStates[i] = ShardRoutingState.UNASSIGNED; replicaStates[i] = ShardRoutingState.UNASSIGNED;
} }
return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates); return state(index, activePrimaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates);
} }
/** /**

View File

@ -37,10 +37,13 @@ import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -52,6 +55,7 @@ import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESAllocationTestCase;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.cluster.TestClusterService;
import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.CapturingTransport;
@ -66,6 +70,7 @@ import org.junit.BeforeClass;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
@ -75,9 +80,10 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary;
import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.empty;
@ -203,6 +209,56 @@ public class TransportReplicationActionTests extends ESTestCase {
assertIndexShardCounter(1); assertIndexShardCounter(1);
} }
/**
* When relocating a primary shard, there is a cluster state update at the end of relocation where the active primary is switched from
* the relocation source to the relocation target. If relocation source receives and processes this cluster state
* before the relocation target, there is a time span where relocation source believes active primary to be on
* relocation target and relocation target believes active primary to be on relocation source. This results in replication
* requests being sent back and forth.
*
* This test checks that replication request is not routed back from relocation target to relocation source in case of
* stale index routing table on relocation target.
*/
public void testNoRerouteOnStaleClusterState() throws InterruptedException, ExecutionException {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState state = state(index, true, ShardRoutingState.RELOCATING);
String relocationTargetNode = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId();
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(relocationTargetNode)).build();
clusterService.setState(state);
logger.debug("--> relocation ongoing state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(shardId).timeout("1ms").routedBasedOnClusterVersion(clusterService.state().version() + 1);
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener);
reroutePhase.run();
assertListenerThrows("cluster state too old didn't cause a timeout", listener, UnavailableShardsException.class);
request = new Request(shardId).routedBasedOnClusterVersion(clusterService.state().version() + 1);
listener = new PlainActionFuture<>();
reroutePhase = action.new ReroutePhase(null, request, listener);
reroutePhase.run();
assertFalse("cluster state too old didn't cause a retry", listener.isDone());
// finish relocation
ShardRouting relocationTarget = clusterService.state().getRoutingTable().shardRoutingTable(shardId).shardsWithState(ShardRoutingState.INITIALIZING).get(0);
AllocationService allocationService = ESAllocationTestCase.createAllocationService();
RoutingAllocation.Result result = allocationService.applyStartedShards(state, Arrays.asList(relocationTarget));
ClusterState updatedState = ClusterState.builder(clusterService.state()).routingResult(result).build();
clusterService.setState(updatedState);
logger.debug("--> relocation complete state:\n{}", clusterService.state().prettyPrint());
IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId();
final List<CapturingTransport.CapturedRequest> capturedRequests =
transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId);
assertThat(capturedRequests, notNullValue());
assertThat(capturedRequests.size(), equalTo(1));
assertThat(capturedRequests.get(0).action, equalTo("testAction[p]"));
assertIndexShardCounter(1);
}
public void testUnknownIndexOrShardOnReroute() throws InterruptedException { public void testUnknownIndexOrShardOnReroute() throws InterruptedException {
final String index = "test"; final String index = "test";
// no replicas in oder to skip the replication part // no replicas in oder to skip the replication part
@ -225,7 +281,7 @@ public class TransportReplicationActionTests extends ESTestCase {
final String index = "test"; final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0); final ShardId shardId = new ShardId(index, "_na_", 0);
clusterService.setState(stateWithStartedPrimary(index, randomBoolean(), 3)); clusterService.setState(stateWithActivePrimary(index, randomBoolean(), 3));
logger.debug("using state: \n{}", clusterService.state().prettyPrint()); logger.debug("using state: \n{}", clusterService.state().prettyPrint());
@ -249,33 +305,73 @@ public class TransportReplicationActionTests extends ESTestCase {
assertIndexShardUninitialized(); assertIndexShardUninitialized();
} }
public void testPrimaryPhaseExecutesRequest() throws InterruptedException, ExecutionException { public void testPrimaryPhaseExecutesOrDelegatesRequestToRelocationTarget() throws InterruptedException, ExecutionException {
final String index = "test"; final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0); final ShardId shardId = new ShardId(index, "_na_", 0);
clusterService.setState(state(index, true, ShardRoutingState.STARTED, ShardRoutingState.STARTED)); ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
clusterService.setState(state);
Request request = new Request(shardId).timeout("1ms"); Request request = new Request(shardId).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener)); AtomicBoolean movedToReplication = new AtomicBoolean();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener)) {
@Override
void finishAndMoveToReplication(TransportReplicationAction.ReplicationPhase replicationPhase) {
super.finishAndMoveToReplication(replicationPhase);
movedToReplication.set(true);
}
};
ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
boolean executeOnPrimary = true;
if (primaryShard.relocating() && randomBoolean()) { // whether shard has been marked as relocated already (i.e. relocation completed)
isRelocated.set(true);
indexShardRouting.set(primaryShard);
executeOnPrimary = false;
}
primaryPhase.run(); primaryPhase.run();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); assertThat(request.processedOnPrimary.get(), equalTo(executeOnPrimary));
final String replicaNodeId = clusterService.state().getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards().get(0).currentNodeId(); assertThat(movedToReplication.get(), equalTo(executeOnPrimary));
final List<CapturingTransport.CapturedRequest> requests = transport.getCapturedRequestsByTargetNodeAndClear().get(replicaNodeId); if (executeOnPrimary == false) {
final List<CapturingTransport.CapturedRequest> requests = transport.capturedRequestsByTargetNode().get(primaryShard.relocatingNodeId());
assertThat(requests, notNullValue()); assertThat(requests, notNullValue());
assertThat(requests.size(), equalTo(1)); assertThat(requests.size(), equalTo(1));
assertThat("replica request was not sent", requests.get(0).action, equalTo("testAction[r]")); assertThat("primary request was not delegated to relocation target", requests.get(0).action, equalTo("testAction[p]"));
}
}
public void testPrimaryPhaseExecutesDelegatedRequestOnRelocationTarget() throws InterruptedException, ExecutionException {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState state = state(index, true, ShardRoutingState.RELOCATING);
String primaryTargetNodeId = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId();
// simulate execution of the primary phase on the relocation target node
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryTargetNodeId)).build();
clusterService.setState(state);
Request request = new Request(shardId).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
AtomicBoolean movedToReplication = new AtomicBoolean();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener)) {
@Override
void finishAndMoveToReplication(TransportReplicationAction.ReplicationPhase replicationPhase) {
super.finishAndMoveToReplication(replicationPhase);
movedToReplication.set(true);
}
};
primaryPhase.run();
assertThat("request was not processed on primary relocation target", request.processedOnPrimary.get(), equalTo(true));
assertThat(movedToReplication.get(), equalTo(true));
} }
public void testAddedReplicaAfterPrimaryOperation() { public void testAddedReplicaAfterPrimaryOperation() {
final String index = "test"; final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0); final ShardId shardId = new ShardId(index, "_na_", 0);
// start with no replicas // start with no replicas
clusterService.setState(stateWithStartedPrimary(index, true, 0)); clusterService.setState(stateWithActivePrimary(index, true, 0));
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
final ClusterState stateWithAddedReplicas = state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED); final ClusterState stateWithAddedReplicas = state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED);
final Action actionWithAddedReplicaAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { final Action actionWithAddedReplicaAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override @Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception {
final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest); final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest);
// add replicas after primary operation // add replicas after primary operation
((TestClusterService) clusterService).setState(stateWithAddedReplicas); ((TestClusterService) clusterService).setState(stateWithAddedReplicas);
@ -308,7 +404,7 @@ public class TransportReplicationActionTests extends ESTestCase {
final Action actionWithRelocatingReplicasAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { final Action actionWithRelocatingReplicasAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override @Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception {
final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest); final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest);
// set replica to relocating // set replica to relocating
((TestClusterService) clusterService).setState(stateWithRelocatingReplica); ((TestClusterService) clusterService).setState(stateWithRelocatingReplica);
@ -341,7 +437,7 @@ public class TransportReplicationActionTests extends ESTestCase {
final Action actionWithDeletedIndexAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { final Action actionWithDeletedIndexAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override @Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception {
final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest); final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest);
// delete index after primary op // delete index after primary op
((TestClusterService) clusterService).setState(stateWithDeletedIndex); ((TestClusterService) clusterService).setState(stateWithDeletedIndex);
@ -432,7 +528,13 @@ public class TransportReplicationActionTests extends ESTestCase {
final String index = "test"; final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0); final ShardId shardId = new ShardId(index, "_na_", 0);
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
if (primaryShard.relocating() && randomBoolean()) {
// simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build();
}
clusterService.setState(state);
final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
int assignedReplicas = 0; int assignedReplicas = 0;
@ -448,19 +550,26 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
} }
runReplicateTest(shardRoutingTable, assignedReplicas, totalShards); runReplicateTest(state, shardRoutingTable, assignedReplicas, totalShards);
} }
public void testReplicationWithShadowIndex() throws ExecutionException, InterruptedException { public void testReplicationWithShadowIndex() throws ExecutionException, InterruptedException {
final String index = "test"; final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0); final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState state = stateWithStartedPrimary(index, true, randomInt(5)); ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
MetaData.Builder metaData = MetaData.builder(state.metaData()); MetaData.Builder metaData = MetaData.builder(state.metaData());
Settings.Builder settings = Settings.builder().put(metaData.get(index).getSettings()); Settings.Builder settings = Settings.builder().put(metaData.get(index).getSettings());
settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true); settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true);
metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings)); metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings));
clusterService.setState(ClusterState.builder(state).metaData(metaData)); state = ClusterState.builder(state).metaData(metaData).build();
ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
if (primaryShard.relocating() && randomBoolean()) {
// simulate execution of the primary phase on the relocation target node
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build();
}
clusterService.setState(state);
final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
int assignedReplicas = 0; int assignedReplicas = 0;
@ -472,18 +581,22 @@ public class TransportReplicationActionTests extends ESTestCase {
totalShards++; totalShards++;
} }
} }
runReplicateTest(shardRoutingTable, assignedReplicas, totalShards); runReplicateTest(state, shardRoutingTable, assignedReplicas, totalShards);
} }
protected void runReplicateTest(IndexShardRoutingTable shardRoutingTable, int assignedReplicas, int totalShards) throws InterruptedException, ExecutionException { protected void runReplicateTest(ClusterState state, IndexShardRoutingTable shardRoutingTable, int assignedReplicas, int totalShards) throws InterruptedException, ExecutionException {
final ShardIterator shardIt = shardRoutingTable.shardsIt(); final ShardIterator shardIt = shardRoutingTable.shardsIt();
final ShardId shardId = shardIt.shardId(); final ShardId shardId = shardIt.shardId();
final Request request = new Request(shardId); final Request request = new Request(shardId);
final PlainActionFuture<Response> listener = new PlainActionFuture<>(); final PlainActionFuture<Response> listener = new PlainActionFuture<>();
logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint()); logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint());
Releasable reference = getOrCreateIndexShardOperationsCounter(); TransportReplicationAction.IndexShardReference reference = getOrCreateIndexShardOperationsCounter();
ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
indexShardRouting.set(primaryShard);
assertIndexShardCounter(2); assertIndexShardCounter(2);
// TODO: set a default timeout // TODO: set a default timeout
TransportReplicationAction<Request, Request, Response>.ReplicationPhase replicationPhase = TransportReplicationAction<Request, Request, Response>.ReplicationPhase replicationPhase =
@ -507,8 +620,9 @@ public class TransportReplicationActionTests extends ESTestCase {
assertEquals(request.shardId, replicationRequest.shardId); assertEquals(request.shardId, replicationRequest.shardId);
} }
String localNodeId = clusterService.state().getNodes().localNodeId();
// no request was sent to the local node // no request was sent to the local node
assertThat(nodesSentTo.keySet(), not(hasItem(clusterService.state().getNodes().localNodeId()))); assertThat(nodesSentTo.keySet(), not(hasItem(localNodeId)));
// requests were sent to the correct shard copies // requests were sent to the correct shard copies
for (ShardRouting shard : clusterService.state().getRoutingTable().shardRoutingTable(shardId)) { for (ShardRouting shard : clusterService.state().getRoutingTable().shardRoutingTable(shardId)) {
@ -518,11 +632,11 @@ public class TransportReplicationActionTests extends ESTestCase {
if (shard.unassigned()) { if (shard.unassigned()) {
continue; continue;
} }
if (shard.primary() == false) { if (localNodeId.equals(shard.currentNodeId()) == false) {
nodesSentTo.remove(shard.currentNodeId()); assertThat(nodesSentTo.remove(shard.currentNodeId()), notNullValue());
} }
if (shard.relocating()) { if (shard.relocating() && localNodeId.equals(shard.relocatingNodeId()) == false) { // for relocating primaries, we replicate from target to source if source is marked as relocated
nodesSentTo.remove(shard.relocatingNodeId()); assertThat(nodesSentTo.remove(shard.relocatingNodeId()), notNullValue());
} }
} }
@ -629,6 +743,7 @@ public class TransportReplicationActionTests extends ESTestCase {
// shard operation should be ongoing, so the counter is at 2 // shard operation should be ongoing, so the counter is at 2
// we have to wait here because increment happens in thread // we have to wait here because increment happens in thread
assertBusy(() -> assertIndexShardCounter(2)); assertBusy(() -> assertIndexShardCounter(2));
assertThat(transport.capturedRequests().length, equalTo(0)); assertThat(transport.capturedRequests().length, equalTo(0));
((ActionWithDelay) action).countDownLatch.countDown(); ((ActionWithDelay) action).countDownLatch.countDown();
t.join(); t.join();
@ -644,6 +759,8 @@ public class TransportReplicationActionTests extends ESTestCase {
// one replica to make sure replication is attempted // one replica to make sure replication is attempted
clusterService.setState(state(index, true, clusterService.setState(state(index, true,
ShardRoutingState.STARTED, ShardRoutingState.STARTED)); ShardRoutingState.STARTED, ShardRoutingState.STARTED));
ShardRouting primaryShard = clusterService.state().routingTable().shardRoutingTable(shardId).primaryShard();
indexShardRouting.set(primaryShard);
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(shardId).timeout("100ms"); Request request = new Request(shardId).timeout("100ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
@ -726,12 +843,28 @@ public class TransportReplicationActionTests extends ESTestCase {
private final AtomicInteger count = new AtomicInteger(0); private final AtomicInteger count = new AtomicInteger(0);
private final AtomicBoolean isRelocated = new AtomicBoolean(false);
private final AtomicReference<ShardRouting> indexShardRouting = new AtomicReference<>();
/* /*
* Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run. * Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run.
* */ * */
private synchronized Releasable getOrCreateIndexShardOperationsCounter() { private synchronized TransportReplicationAction.IndexShardReference getOrCreateIndexShardOperationsCounter() {
count.incrementAndGet(); count.incrementAndGet();
return new Releasable() { return new TransportReplicationAction.IndexShardReference() {
@Override
public boolean isRelocated() {
return isRelocated.get();
}
@Override
public ShardRouting routingEntry() {
ShardRouting shardRouting = indexShardRouting.get();
assert shardRouting != null;
return shardRouting;
}
@Override @Override
public void close() { public void close() {
count.decrementAndGet(); count.decrementAndGet();
@ -783,7 +916,7 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
@Override @Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception {
boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true); boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true);
assert executedBefore == false : "request has already been executed on the primary"; assert executedBefore == false : "request has already been executed on the primary";
return new Tuple<>(new Response(), shardRequest); return new Tuple<>(new Response(), shardRequest);
@ -805,7 +938,11 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
@Override @Override
protected Releasable getIndexShardOperationsCounter(ShardId shardId) { protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) {
return getOrCreateIndexShardOperationsCounter();
}
protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId) {
return getOrCreateIndexShardOperationsCounter(); return getOrCreateIndexShardOperationsCounter();
} }
} }
@ -832,7 +969,7 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
@Override @Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) {
return throwException(shardRequest.shardId()); return throwException(shardRequest.shardId());
} }
@ -870,7 +1007,7 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
@Override @Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception {
awaitLatch(); awaitLatch();
return new Tuple<>(new Response(), shardRequest); return new Tuple<>(new Response(), shardRequest);
} }

View File

@ -279,6 +279,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
} }
} }
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/16373")
public void testOldIndexes() throws Exception { public void testOldIndexes() throws Exception {
setupCluster(); setupCluster();

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.action.shard; package org.elasticsearch.cluster.action.shard;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
@ -28,6 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardIterator;
@ -38,6 +40,9 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.index.Index;
import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.ESAllocationTestCase;
import org.junit.Before; import org.junit.Before;
@ -45,12 +50,15 @@ import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function; import java.util.function.Function;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.not;
public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCase { public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCase {
@ -119,9 +127,25 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
tasks.addAll(failingTasks); tasks.addAll(failingTasks);
tasks.addAll(nonExistentTasks); tasks.addAll(nonExistentTasks);
ClusterStateTaskExecutor.BatchResult<ShardStateAction.ShardRoutingEntry> result = failingExecutor.execute(currentState, tasks); ClusterStateTaskExecutor.BatchResult<ShardStateAction.ShardRoutingEntry> result = failingExecutor.execute(currentState, tasks);
Map<ShardStateAction.ShardRoutingEntry, Boolean> taskResultMap = Map<ShardStateAction.ShardRoutingEntry, ClusterStateTaskExecutor.TaskResult> taskResultMap =
failingTasks.stream().collect(Collectors.toMap(Function.identity(), task -> false)); failingTasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.failure(new RuntimeException("simulated applyFailedShards failure"))));
taskResultMap.putAll(nonExistentTasks.stream().collect(Collectors.toMap(Function.identity(), task -> true))); taskResultMap.putAll(nonExistentTasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.success())));
assertTaskResults(taskResultMap, result, currentState, false);
}
public void testIllegalShardFailureRequests() throws Exception {
String reason = "test illegal shard failure requests";
ClusterState currentState = createClusterStateWithStartedShards(reason);
List<ShardStateAction.ShardRoutingEntry> failingTasks = createExistingShards(currentState, reason);
List<ShardStateAction.ShardRoutingEntry> tasks = new ArrayList<>();
for (ShardStateAction.ShardRoutingEntry failingTask : failingTasks) {
tasks.add(new ShardStateAction.ShardRoutingEntry(failingTask.getShardRouting(), randomInvalidSourceShard(currentState, failingTask.getShardRouting()), failingTask.message, failingTask.failure));
}
Map<ShardStateAction.ShardRoutingEntry, ClusterStateTaskExecutor.TaskResult> taskResultMap =
tasks.stream().collect(Collectors.toMap(
Function.identity(),
task -> ClusterStateTaskExecutor.TaskResult.failure(new ShardStateAction.NoLongerPrimaryShardException(task.getShardRouting().shardId(), "source shard [" + task.sourceShardRouting + "] is neither the local allocation nor the primary allocation"))));
ClusterStateTaskExecutor.BatchResult<ShardStateAction.ShardRoutingEntry> result = executor.execute(currentState, tasks);
assertTaskResults(taskResultMap, result, currentState, false); assertTaskResults(taskResultMap, result, currentState, false);
} }
@ -156,17 +180,22 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
for (int i = 0; i < numberOfTasks; i++) { for (int i = 0; i < numberOfTasks; i++) {
shardsToFail.add(randomFrom(failures)); shardsToFail.add(randomFrom(failures));
} }
return toTasks(shardsToFail, indexUUID, reason); return toTasks(currentState, shardsToFail, indexUUID, reason);
} }
private List<ShardStateAction.ShardRoutingEntry> createNonExistentShards(ClusterState currentState, String reason) { private List<ShardStateAction.ShardRoutingEntry> createNonExistentShards(ClusterState currentState, String reason) {
// add shards from a non-existent index // add shards from a non-existent index
MetaData nonExistentMetaData = String nonExistentIndexUUID = "non-existent";
MetaData.builder() Index index = new Index("non-existent", nonExistentIndexUUID);
.put(IndexMetaData.builder("non-existent").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(numberOfReplicas)) List<String> nodeIds = new ArrayList<>();
.build(); for (ObjectCursor<String> nodeId : currentState.nodes().getNodes().keys()) {
RoutingTable routingTable = RoutingTable.builder().addAsNew(nonExistentMetaData.index("non-existent")).build(); nodeIds.add(nodeId.toString());
String nonExistentIndexUUID = nonExistentMetaData.index("non-existent").getIndexUUID(); }
List<ShardRouting> nonExistentShards = new ArrayList<>();
nonExistentShards.add(nonExistentShardRouting(index, nodeIds, true));
for (int i = 0; i < numberOfReplicas; i++) {
nonExistentShards.add(nonExistentShardRouting(index, nodeIds, false));
}
List<ShardStateAction.ShardRoutingEntry> existingShards = createExistingShards(currentState, reason); List<ShardStateAction.ShardRoutingEntry> existingShards = createExistingShards(currentState, reason);
List<ShardStateAction.ShardRoutingEntry> shardsWithMismatchedAllocationIds = new ArrayList<>(); List<ShardStateAction.ShardRoutingEntry> shardsWithMismatchedAllocationIds = new ArrayList<>();
@ -174,28 +203,32 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
ShardRouting sr = existingShard.getShardRouting(); ShardRouting sr = existingShard.getShardRouting();
ShardRouting nonExistentShardRouting = ShardRouting nonExistentShardRouting =
TestShardRouting.newShardRouting(sr.index(), sr.id(), sr.currentNodeId(), sr.relocatingNodeId(), sr.restoreSource(), sr.primary(), sr.state(), sr.version()); TestShardRouting.newShardRouting(sr.index(), sr.id(), sr.currentNodeId(), sr.relocatingNodeId(), sr.restoreSource(), sr.primary(), sr.state(), sr.version());
shardsWithMismatchedAllocationIds.add(new ShardStateAction.ShardRoutingEntry(nonExistentShardRouting, existingShard.indexUUID, existingShard.message, existingShard.failure)); shardsWithMismatchedAllocationIds.add(new ShardStateAction.ShardRoutingEntry(nonExistentShardRouting, nonExistentShardRouting, existingShard.message, existingShard.failure));
} }
List<ShardStateAction.ShardRoutingEntry> tasks = new ArrayList<>(); List<ShardStateAction.ShardRoutingEntry> tasks = new ArrayList<>();
tasks.addAll(toTasks(routingTable.allShards(), nonExistentIndexUUID, reason)); nonExistentShards.forEach(shard -> tasks.add(new ShardStateAction.ShardRoutingEntry(shard, shard, reason, new CorruptIndexException("simulated", nonExistentIndexUUID))));
tasks.addAll(shardsWithMismatchedAllocationIds); tasks.addAll(shardsWithMismatchedAllocationIds);
return tasks; return tasks;
} }
private ShardRouting nonExistentShardRouting(Index index, List<String> nodeIds, boolean primary) {
return TestShardRouting.newShardRouting(index, 0, randomFrom(nodeIds), primary, randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.RELOCATING, ShardRoutingState.STARTED), randomIntBetween(1, 8));
}
private static void assertTasksSuccessful( private static void assertTasksSuccessful(
List<ShardStateAction.ShardRoutingEntry> tasks, List<ShardStateAction.ShardRoutingEntry> tasks,
ClusterStateTaskExecutor.BatchResult<ShardStateAction.ShardRoutingEntry> result, ClusterStateTaskExecutor.BatchResult<ShardStateAction.ShardRoutingEntry> result,
ClusterState clusterState, ClusterState clusterState,
boolean clusterStateChanged boolean clusterStateChanged
) { ) {
Map<ShardStateAction.ShardRoutingEntry, Boolean> taskResultMap = Map<ShardStateAction.ShardRoutingEntry, ClusterStateTaskExecutor.TaskResult> taskResultMap =
tasks.stream().collect(Collectors.toMap(Function.identity(), task -> true)); tasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.success()));
assertTaskResults(taskResultMap, result, clusterState, clusterStateChanged); assertTaskResults(taskResultMap, result, clusterState, clusterStateChanged);
} }
private static void assertTaskResults( private static void assertTaskResults(
Map<ShardStateAction.ShardRoutingEntry, Boolean> taskResultMap, Map<ShardStateAction.ShardRoutingEntry, ClusterStateTaskExecutor.TaskResult> taskResultMap,
ClusterStateTaskExecutor.BatchResult<ShardStateAction.ShardRoutingEntry> result, ClusterStateTaskExecutor.BatchResult<ShardStateAction.ShardRoutingEntry> result,
ClusterState clusterState, ClusterState clusterState,
boolean clusterStateChanged boolean clusterStateChanged
@ -203,24 +236,29 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
// there should be as many task results as tasks // there should be as many task results as tasks
assertEquals(taskResultMap.size(), result.executionResults.size()); assertEquals(taskResultMap.size(), result.executionResults.size());
for (Map.Entry<ShardStateAction.ShardRoutingEntry, Boolean> entry : taskResultMap.entrySet()) { for (Map.Entry<ShardStateAction.ShardRoutingEntry, ClusterStateTaskExecutor.TaskResult> entry : taskResultMap.entrySet()) {
// every task should have a corresponding task result // every task should have a corresponding task result
assertTrue(result.executionResults.containsKey(entry.getKey())); assertTrue(result.executionResults.containsKey(entry.getKey()));
// the task results are as expected // the task results are as expected
assertEquals(entry.getValue(), result.executionResults.get(entry.getKey()).isSuccess()); assertEquals(entry.getValue().isSuccess(), result.executionResults.get(entry.getKey()).isSuccess());
} }
// every shard that we requested to be successfully failed is
// gone
List<ShardRouting> shards = clusterState.getRoutingTable().allShards(); List<ShardRouting> shards = clusterState.getRoutingTable().allShards();
for (Map.Entry<ShardStateAction.ShardRoutingEntry, Boolean> entry : taskResultMap.entrySet()) { for (Map.Entry<ShardStateAction.ShardRoutingEntry, ClusterStateTaskExecutor.TaskResult> entry : taskResultMap.entrySet()) {
if (entry.getValue()) { if (entry.getValue().isSuccess()) {
// the shard was successfully failed and so should not
// be in the routing table
for (ShardRouting shard : shards) { for (ShardRouting shard : shards) {
if (entry.getKey().getShardRouting().allocationId() != null) { if (entry.getKey().getShardRouting().allocationId() != null) {
assertThat(shard.allocationId(), not(equalTo(entry.getKey().getShardRouting().allocationId()))); assertThat(shard.allocationId(), not(equalTo(entry.getKey().getShardRouting().allocationId())));
} }
} }
} else {
// check we saw the expected failure
ClusterStateTaskExecutor.TaskResult actualResult = result.executionResults.get(entry.getKey());
assertThat(actualResult.getFailure(), instanceOf(entry.getValue().getFailure().getClass()));
assertThat(actualResult.getFailure().getMessage(), equalTo(entry.getValue().getFailure().getMessage()));
} }
} }
@ -231,11 +269,49 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
} }
} }
private static List<ShardStateAction.ShardRoutingEntry> toTasks(List<ShardRouting> shards, String indexUUID, String message) { private static List<ShardStateAction.ShardRoutingEntry> toTasks(ClusterState currentState, List<ShardRouting> shards, String indexUUID, String message) {
return shards return shards
.stream() .stream()
.map(shard -> new ShardStateAction.ShardRoutingEntry(shard, indexUUID, message, new CorruptIndexException("simulated", indexUUID))) .map(shard -> new ShardStateAction.ShardRoutingEntry(shard, randomValidSourceShard(currentState, shard), message, new CorruptIndexException("simulated", indexUUID)))
.collect(Collectors.toList()); .collect(Collectors.toList());
} }
private static ShardRouting randomValidSourceShard(ClusterState currentState, ShardRouting shardRouting) {
// for the request node ID to be valid, either the request is
// from the node the shard is assigned to, or the request is
// from the node holding the primary shard
if (randomBoolean()) {
// request from local node
return shardRouting;
} else {
// request from primary node unless in the case of
// non-existent shards there is not one and we fallback to
// the local node
ShardRouting primaryNodeId = primaryShard(currentState, shardRouting);
return primaryNodeId != null ? primaryNodeId : shardRouting;
}
}
private static ShardRouting randomInvalidSourceShard(ClusterState currentState, ShardRouting shardRouting) {
ShardRouting primaryShard = primaryShard(currentState, shardRouting);
Set<ShardRouting> shards =
currentState
.routingTable()
.allShards()
.stream()
.filter(shard -> !shard.isSameAllocation(shardRouting))
.filter(shard -> !shard.isSameAllocation(primaryShard))
.collect(Collectors.toSet());
if (!shards.isEmpty()) {
return randomSubsetOf(1, shards.toArray(new ShardRouting[0])).get(0);
} else {
return
TestShardRouting.newShardRouting(shardRouting.index(), shardRouting.id(), DiscoveryService.generateNodeId(Settings.EMPTY), randomBoolean(), randomFrom(ShardRoutingState.values()), shardRouting.version());
}
}
private static ShardRouting primaryShard(ClusterState currentState, ShardRouting shardRouting) {
IndexShardRoutingTable indexShard = currentState.getRoutingTable().shardRoutingTableOrNull(shardRouting.shardId());
return indexShard == null ? null : indexShard.primaryShard();
}
} }

View File

@ -20,6 +20,7 @@
package org.elasticsearch.cluster.action.shard; package org.elasticsearch.cluster.action.shard;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateObserver;
@ -29,11 +30,12 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.cluster.TestClusterService;
import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.CapturingTransport;
@ -55,7 +57,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.function.LongConsumer; import java.util.function.LongConsumer;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
@ -127,15 +128,13 @@ public class ShardStateActionTests extends ESTestCase {
public void testSuccess() throws InterruptedException { public void testSuccess() throws InterruptedException {
final String index = "test"; final String index = "test";
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
String indexUUID = clusterService.state().metaData().index(index).getIndexUUID();
AtomicBoolean success = new AtomicBoolean(); AtomicBoolean success = new AtomicBoolean();
CountDownLatch latch = new CountDownLatch(1); CountDownLatch latch = new CountDownLatch(1);
ShardRouting shardRouting = getRandomShardRouting(index); ShardRouting shardRouting = getRandomShardRouting(index);
shardStateAction.shardFailed(shardRouting, indexUUID, "test", getSimulatedFailure(), new ShardStateAction.Listener() { shardStateAction.shardFailed(shardRouting, shardRouting, "test", getSimulatedFailure(), new ShardStateAction.Listener() {
@Override @Override
public void onSuccess() { public void onSuccess() {
success.set(true); success.set(true);
@ -169,21 +168,20 @@ public class ShardStateActionTests extends ESTestCase {
public void testNoMaster() throws InterruptedException { public void testNoMaster() throws InterruptedException {
final String index = "test"; final String index = "test";
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
DiscoveryNodes.Builder noMasterBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); DiscoveryNodes.Builder noMasterBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
noMasterBuilder.masterNodeId(null); noMasterBuilder.masterNodeId(null);
clusterService.setState(ClusterState.builder(clusterService.state()).nodes(noMasterBuilder)); clusterService.setState(ClusterState.builder(clusterService.state()).nodes(noMasterBuilder));
String indexUUID = clusterService.state().metaData().index(index).getIndexUUID();
CountDownLatch latch = new CountDownLatch(1); CountDownLatch latch = new CountDownLatch(1);
AtomicInteger retries = new AtomicInteger(); AtomicInteger retries = new AtomicInteger();
AtomicBoolean success = new AtomicBoolean(); AtomicBoolean success = new AtomicBoolean();
setUpMasterRetryVerification(1, retries, latch, requestId -> {}); setUpMasterRetryVerification(1, retries, latch, requestId -> {});
shardStateAction.shardFailed(getRandomShardRouting(index), indexUUID, "test", getSimulatedFailure(), new ShardStateAction.Listener() { ShardRouting failedShard = getRandomShardRouting(index);
shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() {
@Override @Override
public void onSuccess() { public void onSuccess() {
success.set(true); success.set(true);
@ -207,9 +205,7 @@ public class ShardStateActionTests extends ESTestCase {
public void testMasterChannelException() throws InterruptedException { public void testMasterChannelException() throws InterruptedException {
final String index = "test"; final String index = "test";
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
String indexUUID = clusterService.state().metaData().index(index).getIndexUUID();
CountDownLatch latch = new CountDownLatch(1); CountDownLatch latch = new CountDownLatch(1);
AtomicInteger retries = new AtomicInteger(); AtomicInteger retries = new AtomicInteger();
@ -233,7 +229,8 @@ public class ShardStateActionTests extends ESTestCase {
final int numberOfRetries = randomIntBetween(1, 256); final int numberOfRetries = randomIntBetween(1, 256);
setUpMasterRetryVerification(numberOfRetries, retries, latch, retryLoop); setUpMasterRetryVerification(numberOfRetries, retries, latch, retryLoop);
shardStateAction.shardFailed(getRandomShardRouting(index), indexUUID, "test", getSimulatedFailure(), new ShardStateAction.Listener() { ShardRouting failedShard = getRandomShardRouting(index);
shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() {
@Override @Override
public void onSuccess() { public void onSuccess() {
success.set(true); success.set(true);
@ -264,13 +261,12 @@ public class ShardStateActionTests extends ESTestCase {
public void testUnhandledFailure() { public void testUnhandledFailure() {
final String index = "test"; final String index = "test";
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
String indexUUID = clusterService.state().metaData().index(index).getIndexUUID();
AtomicBoolean failure = new AtomicBoolean(); AtomicBoolean failure = new AtomicBoolean();
shardStateAction.shardFailed(getRandomShardRouting(index), indexUUID, "test", getSimulatedFailure(), new ShardStateAction.Listener() { ShardRouting failedShard = getRandomShardRouting(index);
shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() {
@Override @Override
public void onSuccess() { public void onSuccess() {
failure.set(false); failure.set(false);
@ -294,9 +290,7 @@ public class ShardStateActionTests extends ESTestCase {
public void testShardNotFound() throws InterruptedException { public void testShardNotFound() throws InterruptedException {
final String index = "test"; final String index = "test";
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
String indexUUID = clusterService.state().metaData().index(index).getIndexUUID();
AtomicBoolean success = new AtomicBoolean(); AtomicBoolean success = new AtomicBoolean();
CountDownLatch latch = new CountDownLatch(1); CountDownLatch latch = new CountDownLatch(1);
@ -304,7 +298,7 @@ public class ShardStateActionTests extends ESTestCase {
ShardRouting failedShard = getRandomShardRouting(index); ShardRouting failedShard = getRandomShardRouting(index);
RoutingTable routingTable = RoutingTable.builder(clusterService.state().getRoutingTable()).remove(index).build(); RoutingTable routingTable = RoutingTable.builder(clusterService.state().getRoutingTable()).remove(index).build();
clusterService.setState(ClusterState.builder(clusterService.state()).routingTable(routingTable)); clusterService.setState(ClusterState.builder(clusterService.state()).routingTable(routingTable));
shardStateAction.shardFailed(failedShard, indexUUID, "test", getSimulatedFailure(), new ShardStateAction.Listener() { shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() {
@Override @Override
public void onSuccess() { public void onSuccess() {
success.set(true); success.set(true);
@ -326,6 +320,44 @@ public class ShardStateActionTests extends ESTestCase {
assertTrue(success.get()); assertTrue(success.get());
} }
public void testNoLongerPrimaryShardException() throws InterruptedException {
final String index = "test";
clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
ShardRouting failedShard = getRandomShardRouting(index);
String nodeId = randomFrom(clusterService.state().nodes().nodes().keys().toArray(String.class));
AtomicReference<Throwable> failure = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
ShardRouting sourceFailedShard = TestShardRouting.newShardRouting(failedShard.index(), failedShard.id(), nodeId, randomBoolean(), randomFrom(ShardRoutingState.values()), failedShard.version());
shardStateAction.shardFailed(failedShard, sourceFailedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() {
@Override
public void onSuccess() {
failure.set(null);
latch.countDown();
}
@Override
public void onFailure(Throwable t) {
failure.set(t);
latch.countDown();
}
});
ShardStateAction.NoLongerPrimaryShardException catastrophicError =
new ShardStateAction.NoLongerPrimaryShardException(failedShard.shardId(), "source shard [" + sourceFailedShard + " is neither the local allocation nor the primary allocation");
CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear();
transport.handleRemoteError(capturedRequests[0].requestId, catastrophicError);
latch.await();
assertNotNull(failure.get());
assertThat(failure.get(), instanceOf(ShardStateAction.NoLongerPrimaryShardException.class));
assertThat(failure.get().getMessage(), equalTo(catastrophicError.getMessage()));
}
private ShardRouting getRandomShardRouting(String index) { private ShardRouting getRandomShardRouting(String index) {
IndexRoutingTable indexRoutingTable = clusterService.state().routingTable().index(index); IndexRoutingTable indexRoutingTable = clusterService.state().routingTable().index(index);
ShardsIterator shardsIterator = indexRoutingTable.randomAllActiveShardsIt(); ShardsIterator shardsIterator = indexRoutingTable.randomAllActiveShardsIt();

View File

@ -19,9 +19,6 @@
package org.elasticsearch.common.cli; package org.elasticsearch.common.cli;
import java.nio.file.NoSuchFileException;
import java.util.List;
import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
@ -46,22 +43,9 @@ public class TerminalTests extends CliToolTestCase {
assertPrinted(terminal, Terminal.Verbosity.VERBOSE, "text"); assertPrinted(terminal, Terminal.Verbosity.VERBOSE, "text");
} }
public void testError() throws Exception { public void testEscaping() throws Exception {
try {
// actually throw so we have a stacktrace
throw new NoSuchFileException("/path/to/some/file");
} catch (NoSuchFileException e) {
CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL); CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL);
terminal.printError(e); assertPrinted(terminal, Terminal.Verbosity.NORMAL, "This message contains percent like %20n");
List<String> output = terminal.getTerminalOutput();
assertFalse(output.isEmpty());
assertTrue(output.get(0), output.get(0).contains("NoSuchFileException")); // exception class
assertTrue(output.get(0), output.get(0).contains("/path/to/some/file")); // message
assertEquals(1, output.size());
// TODO: we should test stack trace is printed in debug mode...except debug is a sysprop instead of
// a command line param...maybe it should be VERBOSE instead of a separate debug prop?
}
} }
private void assertPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) { private void assertPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) {

View File

@ -48,91 +48,6 @@ public class FileSystemUtilsTests extends ESTestCase {
dst = createTempDir(); dst = createTempDir();
Files.createDirectories(src); Files.createDirectories(src);
Files.createDirectories(dst); Files.createDirectories(dst);
// We first copy sources test files from src/test/resources
// Because after when the test runs, src files are moved to their destination
final Path path = getDataPath("/org/elasticsearch/common/io/copyappend");
FileSystemUtils.copyDirectoryRecursively(path, src);
}
public void testMoveOverExistingFileAndAppend() throws IOException {
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v1"), dst, ".new");
assertFileContent(dst, "file1.txt", "version1");
assertFileContent(dst, "dir/file2.txt", "version1");
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v2"), dst, ".new");
assertFileContent(dst, "file1.txt", "version1");
assertFileContent(dst, "dir/file2.txt", "version1");
assertFileContent(dst, "file1.txt.new", "version2");
assertFileContent(dst, "dir/file2.txt.new", "version2");
assertFileContent(dst, "file3.txt", "version1");
assertFileContent(dst, "dir/subdir/file4.txt", "version1");
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v3"), dst, ".new");
assertFileContent(dst, "file1.txt", "version1");
assertFileContent(dst, "dir/file2.txt", "version1");
assertFileContent(dst, "file1.txt.new", "version3");
assertFileContent(dst, "dir/file2.txt.new", "version3");
assertFileContent(dst, "file3.txt", "version1");
assertFileContent(dst, "dir/subdir/file4.txt", "version1");
assertFileContent(dst, "file3.txt.new", "version2");
assertFileContent(dst, "dir/subdir/file4.txt.new", "version2");
assertFileContent(dst, "dir/subdir/file5.txt", "version1");
}
public void testMoveOverExistingFileAndIgnore() throws IOException {
Path dest = createTempDir();
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v1"), dest, null);
assertFileContent(dest, "file1.txt", "version1");
assertFileContent(dest, "dir/file2.txt", "version1");
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v2"), dest, null);
assertFileContent(dest, "file1.txt", "version1");
assertFileContent(dest, "dir/file2.txt", "version1");
assertFileContent(dest, "file1.txt.new", null);
assertFileContent(dest, "dir/file2.txt.new", null);
assertFileContent(dest, "file3.txt", "version1");
assertFileContent(dest, "dir/subdir/file4.txt", "version1");
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v3"), dest, null);
assertFileContent(dest, "file1.txt", "version1");
assertFileContent(dest, "dir/file2.txt", "version1");
assertFileContent(dest, "file1.txt.new", null);
assertFileContent(dest, "dir/file2.txt.new", null);
assertFileContent(dest, "file3.txt", "version1");
assertFileContent(dest, "dir/subdir/file4.txt", "version1");
assertFileContent(dest, "file3.txt.new", null);
assertFileContent(dest, "dir/subdir/file4.txt.new", null);
assertFileContent(dest, "dir/subdir/file5.txt", "version1");
}
public void testMoveFilesDoesNotCreateSameFileWithSuffix() throws Exception {
Path[] dirs = new Path[] { createTempDir(), createTempDir(), createTempDir()};
for (Path dir : dirs) {
Files.write(dir.resolve("file1.txt"), "file1".getBytes(StandardCharsets.UTF_8));
Files.createDirectory(dir.resolve("dir"));
Files.write(dir.resolve("dir").resolve("file2.txt"), "file2".getBytes(StandardCharsets.UTF_8));
}
FileSystemUtils.moveFilesWithoutOverwriting(dirs[0], dst, ".new");
assertFileContent(dst, "file1.txt", "file1");
assertFileContent(dst, "dir/file2.txt", "file2");
// do the same operation again, make sure, no .new files have been added
FileSystemUtils.moveFilesWithoutOverwriting(dirs[1], dst, ".new");
assertFileContent(dst, "file1.txt", "file1");
assertFileContent(dst, "dir/file2.txt", "file2");
assertFileNotExists(dst.resolve("file1.txt.new"));
assertFileNotExists(dst.resolve("dir").resolve("file2.txt.new"));
// change file content, make sure it gets updated
Files.write(dirs[2].resolve("dir").resolve("file2.txt"), "UPDATED".getBytes(StandardCharsets.UTF_8));
FileSystemUtils.moveFilesWithoutOverwriting(dirs[2], dst, ".new");
assertFileContent(dst, "file1.txt", "file1");
assertFileContent(dst, "dir/file2.txt", "file2");
assertFileContent(dst, "dir/file2.txt.new", "UPDATED");
} }
public void testAppend() { public void testAppend() {

View File

@ -37,8 +37,8 @@ import java.util.Arrays;
public class BigArraysTests extends ESSingleNodeTestCase { public class BigArraysTests extends ESSingleNodeTestCase {
public static BigArrays randombigArrays() { private BigArrays randombigArrays() {
final PageCacheRecycler recycler = randomBoolean() ? null : ESSingleNodeTestCase.getInstanceFromNode(PageCacheRecycler.class); final PageCacheRecycler recycler = randomBoolean() ? null : getInstanceFromNode(PageCacheRecycler.class);
return new MockBigArrays(recycler, new NoneCircuitBreakerService()); return new MockBigArrays(recycler, new NoneCircuitBreakerService());
} }

View File

@ -25,6 +25,8 @@ import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.TestUtil;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import java.util.HashMap; import java.util.HashMap;
@ -38,13 +40,18 @@ public class BytesRefHashTests extends ESSingleNodeTestCase {
BytesRefHash hash; BytesRefHash hash;
private BigArrays randombigArrays() {
final PageCacheRecycler recycler = randomBoolean() ? null : getInstanceFromNode(PageCacheRecycler.class);
return new MockBigArrays(recycler, new NoneCircuitBreakerService());
}
private void newHash() { private void newHash() {
if (hash != null) { if (hash != null) {
hash.close(); hash.close();
} }
// Test high load factors to make sure that collision resolution works fine // Test high load factors to make sure that collision resolution works fine
final float maxLoadFactor = 0.6f + randomFloat() * 0.39f; final float maxLoadFactor = 0.6f + randomFloat() * 0.39f;
hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randombigArrays()); hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, randombigArrays());
} }
@Override @Override

View File

@ -22,6 +22,8 @@ package org.elasticsearch.common.util;
import com.carrotsearch.hppc.LongLongHashMap; import com.carrotsearch.hppc.LongLongHashMap;
import com.carrotsearch.hppc.LongLongMap; import com.carrotsearch.hppc.LongLongMap;
import com.carrotsearch.hppc.cursors.LongLongCursor; import com.carrotsearch.hppc.cursors.LongLongCursor;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import java.util.HashMap; import java.util.HashMap;
@ -33,6 +35,11 @@ import java.util.Set;
public class LongHashTests extends ESSingleNodeTestCase { public class LongHashTests extends ESSingleNodeTestCase {
LongHash hash; LongHash hash;
private BigArrays randombigArrays() {
final PageCacheRecycler recycler = randomBoolean() ? null : getInstanceFromNode(PageCacheRecycler.class);
return new MockBigArrays(recycler, new NoneCircuitBreakerService());
}
private void newHash() { private void newHash() {
if (hash != null) { if (hash != null) {
hash.close(); hash.close();
@ -40,7 +47,7 @@ public class LongHashTests extends ESSingleNodeTestCase {
// Test high load factors to make sure that collision resolution works fine // Test high load factors to make sure that collision resolution works fine
final float maxLoadFactor = 0.6f + randomFloat() * 0.39f; final float maxLoadFactor = 0.6f + randomFloat() * 0.39f;
hash = new LongHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randombigArrays()); hash = new LongHash(randomIntBetween(0, 100), maxLoadFactor, randombigArrays());
} }
@Override @Override

View File

@ -20,12 +20,20 @@
package org.elasticsearch.common.util; package org.elasticsearch.common.util;
import com.carrotsearch.hppc.LongObjectHashMap; import com.carrotsearch.hppc.LongObjectHashMap;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
public class LongObjectHashMapTests extends ESSingleNodeTestCase { public class LongObjectHashMapTests extends ESSingleNodeTestCase {
private BigArrays randombigArrays() {
final PageCacheRecycler recycler = randomBoolean() ? null : getInstanceFromNode(PageCacheRecycler.class);
return new MockBigArrays(recycler, new NoneCircuitBreakerService());
}
public void testDuel() { public void testDuel() {
final LongObjectHashMap<Object> map1 = new LongObjectHashMap<>(); final LongObjectHashMap<Object> map1 = new LongObjectHashMap<>();
final LongObjectPagedHashMap<Object> map2 = new LongObjectPagedHashMap<>(randomInt(42), 0.6f + randomFloat() * 0.39f, BigArraysTests.randombigArrays()); final LongObjectPagedHashMap<Object> map2 = new LongObjectPagedHashMap<>(randomInt(42), 0.6f + randomFloat() * 0.39f, randombigArrays());
final int maxKey = randomIntBetween(1, 10000); final int maxKey = randomIntBetween(1, 10000);
final int iters = scaledRandomIntBetween(10000, 100000); final int iters = scaledRandomIntBetween(10000, 100000);
for (int i = 0; i < iters; ++i) { for (int i = 0; i < iters; ++i) {

View File

@ -0,0 +1,115 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
public class SuspendableRefContainerTests extends ESTestCase {
public void testBasicAcquire() throws InterruptedException {
SuspendableRefContainer refContainer = new SuspendableRefContainer();
assertThat(refContainer.activeRefs(), equalTo(0));
Releasable lock1 = randomLockingMethod(refContainer);
assertThat(refContainer.activeRefs(), equalTo(1));
Releasable lock2 = randomLockingMethod(refContainer);
assertThat(refContainer.activeRefs(), equalTo(2));
lock1.close();
assertThat(refContainer.activeRefs(), equalTo(1));
lock1.close(); // check idempotence
assertThat(refContainer.activeRefs(), equalTo(1));
lock2.close();
assertThat(refContainer.activeRefs(), equalTo(0));
}
public void testAcquisitionBlockingBlocksNewAcquisitions() throws InterruptedException {
SuspendableRefContainer refContainer = new SuspendableRefContainer();
assertThat(refContainer.activeRefs(), equalTo(0));
try (Releasable block = refContainer.blockAcquisition()) {
assertThat(refContainer.activeRefs(), equalTo(0));
assertThat(refContainer.tryAcquire(), nullValue());
assertThat(refContainer.activeRefs(), equalTo(0));
}
try (Releasable lock = refContainer.tryAcquire()) {
assertThat(refContainer.activeRefs(), equalTo(1));
}
// same with blocking acquire
AtomicBoolean acquired = new AtomicBoolean();
Thread t = new Thread(() -> {
try (Releasable lock = randomBoolean() ? refContainer.acquire() : refContainer.acquireUninterruptibly()) {
acquired.set(true);
assertThat(refContainer.activeRefs(), equalTo(1));
} catch (InterruptedException e) {
fail("Interrupted");
}
});
try (Releasable block = refContainer.blockAcquisition()) {
assertThat(refContainer.activeRefs(), equalTo(0));
t.start();
// check that blocking acquire really blocks
assertThat(acquired.get(), equalTo(false));
assertThat(refContainer.activeRefs(), equalTo(0));
}
t.join();
assertThat(acquired.get(), equalTo(true));
assertThat(refContainer.activeRefs(), equalTo(0));
}
public void testAcquisitionBlockingWaitsOnExistingAcquisitions() throws InterruptedException {
SuspendableRefContainer refContainer = new SuspendableRefContainer();
AtomicBoolean acquired = new AtomicBoolean();
Thread t = new Thread(() -> {
try (Releasable block = refContainer.blockAcquisition()) {
acquired.set(true);
assertThat(refContainer.activeRefs(), equalTo(0));
}
});
try (Releasable lock = randomLockingMethod(refContainer)) {
assertThat(refContainer.activeRefs(), equalTo(1));
t.start();
assertThat(acquired.get(), equalTo(false));
assertThat(refContainer.activeRefs(), equalTo(1));
}
t.join();
assertThat(acquired.get(), equalTo(true));
assertThat(refContainer.activeRefs(), equalTo(0));
}
private Releasable randomLockingMethod(SuspendableRefContainer refContainer) throws InterruptedException {
switch (randomInt(2)) {
case 0: return refContainer.tryAcquire();
case 1: return refContainer.acquire();
case 2: return refContainer.acquireUninterruptibly();
}
throw new IllegalArgumentException("randomLockingMethod inconsistent");
}
}

View File

@ -56,7 +56,6 @@ import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.ping.ZenPingService;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.store.IndicesStoreIntegrationIT; import org.elasticsearch.indices.store.IndicesStoreIntegrationIT;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
@ -905,7 +904,6 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
ShardRouting failedShard = ShardRouting failedShard =
randomFrom(clusterService().state().getRoutingNodes().node(nonMasterNodeId).shardsWithState(ShardRoutingState.STARTED)); randomFrom(clusterService().state().getRoutingNodes().node(nonMasterNodeId).shardsWithState(ShardRoutingState.STARTED));
ShardStateAction service = internalCluster().getInstance(ShardStateAction.class, nonMasterNode); ShardStateAction service = internalCluster().getInstance(ShardStateAction.class, nonMasterNode);
String indexUUID = clusterService().state().metaData().index("test").getIndexUUID();
CountDownLatch latch = new CountDownLatch(1); CountDownLatch latch = new CountDownLatch(1);
AtomicBoolean success = new AtomicBoolean(); AtomicBoolean success = new AtomicBoolean();
@ -913,7 +911,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
NetworkPartition networkPartition = addRandomIsolation(isolatedNode); NetworkPartition networkPartition = addRandomIsolation(isolatedNode);
networkPartition.startDisrupting(); networkPartition.startDisrupting();
service.shardFailed(failedShard, indexUUID, "simulated", new CorruptIndexException("simulated", (String) null), new ShardStateAction.Listener() { service.shardFailed(failedShard, failedShard, "simulated", new CorruptIndexException("simulated", (String) null), new ShardStateAction.Listener() {
@Override @Override
public void onSuccess() { public void onSuccess() {
success.set(true); success.set(true);

View File

@ -80,20 +80,6 @@ import static org.hamcrest.Matchers.sameInstance;
@ESIntegTestCase.SuppressLocalMode @ESIntegTestCase.SuppressLocalMode
@TestLogging("_root:DEBUG") @TestLogging("_root:DEBUG")
public class ZenDiscoveryIT extends ESIntegTestCase { public class ZenDiscoveryIT extends ESIntegTestCase {
public void testChangeRejoinOnMasterOptionIsDynamic() throws Exception {
Settings nodeSettings = Settings.settingsBuilder()
.put("discovery.type", "zen") // <-- To override the local setting if set externally
.build();
String nodeName = internalCluster().startNode(nodeSettings);
ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName);
assertThat(zenDiscovery.isRejoinOnMasterGone(), is(true));
client().admin().cluster().prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING.getKey(), false))
.get();
assertThat(zenDiscovery.isRejoinOnMasterGone(), is(false));
}
public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception { public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception {
Settings defaultSettings = Settings.builder() Settings defaultSettings = Settings.builder()

Some files were not shown because too many files have changed in this diff Show More