diff --git a/.projectile b/.projectile index d2a5e762a88..49e2b292c26 100644 --- a/.projectile +++ b/.projectile @@ -16,7 +16,6 @@ -/plugins/discovery-azure/target -/plugins/discovery-ec2/target -/plugins/discovery-gce/target --/plugins/discovery-multicast/target -/plugins/jvm-example/target -/plugins/lang-expression/target -/plugins/lang-groovy/target diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index d4d4d08c393..1cdeec762d2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -94,6 +94,9 @@ class PrecommitTasks { project.checkstyle { config = project.resources.text.fromFile( PrecommitTasks.getResource('/checkstyle.xml'), 'UTF-8') + configProperties = [ + suppressions: PrecommitTasks.getResource('/checkstyle_suppressions.xml') + ] } for (String taskName : ['checkstyleMain', 'checkstyleTest']) { Task task = project.tasks.findByName(taskName) diff --git a/buildSrc/src/main/resources/checkstyle.xml b/buildSrc/src/main/resources/checkstyle.xml index b44c649a52b..4dd0534fa01 100644 --- a/buildSrc/src/main/resources/checkstyle.xml +++ b/buildSrc/src/main/resources/checkstyle.xml @@ -6,6 +6,10 @@ + + + + + + + diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index e6dc7deff2b..dbbe98633ae 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -19,6 +19,7 @@ package org.elasticsearch; +import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -613,7 +614,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136), TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class, org.elasticsearch.indices.TypeMissingException::new, 137), FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class, org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140), - QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class, org.elasticsearch.index.query.QueryShardException::new, 141); + QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class, org.elasticsearch.index.query.QueryShardException::new, 141), + NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class, ShardStateAction.NoLongerPrimaryShardException::new, 142); final Class exceptionClass; final FunctionThatThrowsIOException constructor; diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index e55800682dd..330ce5eb99e 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -254,7 +254,11 @@ public class Version { public static final int V_1_7_3_ID = 1070399; public static final Version V_1_7_3 = new Version(V_1_7_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_7_4_ID = 1070499; - public static final Version V_1_7_4 = new Version(V_1_7_4_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final Version V_1_7_4 = new Version(V_1_7_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final int V_1_7_5_ID = 1070599; + public static final Version V_1_7_5 = new Version(V_1_7_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final int V_1_7_6_ID = 1070699; + public static final Version V_1_7_6 = new Version(V_1_7_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_2_0_0_beta1_ID = 2000001; public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); @@ -275,9 +279,13 @@ public class Version { public static final int V_2_1_1_ID = 2010199; public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final int V_2_1_2_ID = 2010299; - public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); + public static final Version V_2_1_2 = new Version(V_2_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); + public static final int V_2_1_3_ID = 2010399; + public static final Version V_2_1_3 = new Version(V_2_1_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final int V_2_2_0_ID = 2020099; - public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0); + public static final Version V_2_2_0 = new Version(V_2_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_4_0); + public static final int V_2_2_1_ID = 2020199; + public static final Version V_2_2_1 = new Version(V_2_2_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0); public static final int V_2_3_0_ID = 2030099; public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0); public static final int V_3_0_0_ID = 3000099; @@ -299,8 +307,12 @@ public class Version { return V_3_0_0; case V_2_3_0_ID: return V_2_3_0; + case V_2_2_1_ID: + return V_2_2_1; case V_2_2_0_ID: return V_2_2_0; + case V_2_1_3_ID: + return V_2_1_3; case V_2_1_2_ID: return V_2_1_2; case V_2_1_1_ID: @@ -321,6 +333,10 @@ public class Version { return V_2_0_0_beta2; case V_2_0_0_beta1_ID: return V_2_0_0_beta1; + case V_1_7_6_ID: + return V_1_7_6; + case V_1_7_5_ID: + return V_1_7_5; case V_1_7_4_ID: return V_1_7_4; case V_1_7_3_ID: diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 8f7fce89c09..302bdafc471 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -58,7 +58,7 @@ public class TransportShardFlushAction extends TransportReplicationAction shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) throws Throwable { + protected Tuple shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.flush(shardRequest.getRequest()); logger.trace("{} flush request executed on primary", indexShard.shardId()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 7c9979e7374..2dd41f7801d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -60,7 +60,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) throws Throwable { + protected Tuple shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.refresh("api"); logger.trace("{} refresh request executed on primary", indexShard.shardId()); diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 33bf3547d0b..fdd018c51f2 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -140,7 +140,7 @@ public class TransportIndexAction extends TransportReplicationAction shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Throwable { + protected Tuple shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception { // validate, if routing is required, that we got routing IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex()); @@ -200,7 +200,7 @@ public class TransportIndexAction extends TransportReplicationAction executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Throwable { + public static WriteResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception { Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); final ShardId shardId = indexShard.shardId(); diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java index 4337d0ee165..c7c0822f04a 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java @@ -22,31 +22,24 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.StatusToXContent; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.ingest.core.PipelineFactoryError; -import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -public class SimulatePipelineResponse extends ActionResponse implements StatusToXContent { +public class SimulatePipelineResponse extends ActionResponse implements ToXContent { private String pipelineId; private boolean verbose; private List results; - private PipelineFactoryError error; public SimulatePipelineResponse() { } - public SimulatePipelineResponse(PipelineFactoryError error) { - this.error = error; - } - public SimulatePipelineResponse(String pipelineId, boolean verbose, List responses) { this.pipelineId = pipelineId; this.verbose = verbose; @@ -65,69 +58,42 @@ public class SimulatePipelineResponse extends ActionResponse implements StatusTo return verbose; } - public boolean isError() { - return error != null; - } - - @Override - public RestStatus status() { - if (isError()) { - return RestStatus.BAD_REQUEST; - } - return RestStatus.OK; - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeBoolean(isError()); - if (isError()) { - error.writeTo(out); - } else { - out.writeString(pipelineId); - out.writeBoolean(verbose); - out.writeVInt(results.size()); - for (SimulateDocumentResult response : results) { - response.writeTo(out); - } + out.writeString(pipelineId); + out.writeBoolean(verbose); + out.writeVInt(results.size()); + for (SimulateDocumentResult response : results) { + response.writeTo(out); } } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - boolean isError = in.readBoolean(); - if (isError) { - error = new PipelineFactoryError(); - error.readFrom(in); - } else { - this.pipelineId = in.readString(); - boolean verbose = in.readBoolean(); - int responsesLength = in.readVInt(); - results = new ArrayList<>(); - for (int i = 0; i < responsesLength; i++) { - SimulateDocumentResult simulateDocumentResult; - if (verbose) { - simulateDocumentResult = SimulateDocumentVerboseResult.readSimulateDocumentVerboseResultFrom(in); - } else { - simulateDocumentResult = SimulateDocumentBaseResult.readSimulateDocumentSimpleResult(in); - } - results.add(simulateDocumentResult); + this.pipelineId = in.readString(); + boolean verbose = in.readBoolean(); + int responsesLength = in.readVInt(); + results = new ArrayList<>(); + for (int i = 0; i < responsesLength; i++) { + SimulateDocumentResult simulateDocumentResult; + if (verbose) { + simulateDocumentResult = SimulateDocumentVerboseResult.readSimulateDocumentVerboseResultFrom(in); + } else { + simulateDocumentResult = SimulateDocumentBaseResult.readSimulateDocumentSimpleResult(in); } + results.add(simulateDocumentResult); } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (isError()) { - error.toXContent(builder, params); - } else { - builder.startArray(Fields.DOCUMENTS); - for (SimulateDocumentResult response : results) { - response.toXContent(builder, params); - } - builder.endArray(); + builder.startArray(Fields.DOCUMENTS); + for (SimulateDocumentResult response : results) { + response.toXContent(builder, params); } + builder.endArray(); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java index 3d6586315ad..4f9a219c8ad 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.ingest; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -27,8 +28,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.ingest.core.PipelineFactoryError; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; import org.elasticsearch.node.service.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -58,9 +57,6 @@ public class SimulatePipelineTransportAction extends HandledTransportAction { - - public WritePipelineResponseRestListener(RestChannel channel) { - super(channel); - } - - @Override - protected void addCustomFields(XContentBuilder builder, WritePipelineResponse response) throws IOException { - if (!response.isAcknowledged()) { - response.getError().toXContent(builder, null); - } - } -} - diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index ed23017410e..4e6ec3c3584 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -55,6 +55,8 @@ public abstract class ReplicationRequest shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable; + protected abstract Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception; /** * Replica operation on nodes with replica copies @@ -299,7 +301,7 @@ public abstract class TransportReplicationAction handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage); transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler); } @@ -353,6 +354,7 @@ public abstract class TransportReplicationAction * Note that as soon as we move to replication action, state responsibility is transferred to {@link ReplicationPhase}. */ - final class PrimaryPhase extends AbstractRunnable { + class PrimaryPhase extends AbstractRunnable { private final Request request; + private final ShardId shardId; private final TransportChannel channel; private final ClusterState state; private final AtomicBoolean finished = new AtomicBoolean(); - private Releasable indexShardReference; + private IndexShardReference indexShardReference; PrimaryPhase(Request request, TransportChannel channel) { this.state = clusterService.state(); this.request = request; + assert request.shardId() != null : "request shardId must be set prior to primary phase"; + this.shardId = request.shardId(); this.channel = channel; } @Override public void onFailure(Throwable e) { + if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { + if (logger.isTraceEnabled()) { + logger.trace("failed to execute [{}] on [{}]", e, request, shardId); + } + } else { + if (logger.isDebugEnabled()) { + logger.debug("failed to execute [{}] on [{}]", e, request, shardId); + } + } finishAsFailed(e); } @Override protected void doRun() throws Exception { // request shardID was set in ReroutePhase - assert request.shardId() != null : "request shardID must be set prior to primary phase"; - final ShardId shardId = request.shardId(); final String writeConsistencyFailure = checkWriteConsistency(shardId); if (writeConsistencyFailure != null) { finishBecauseUnavailable(shardId, writeConsistencyFailure); return; } - final ReplicationPhase replicationPhase; - try { - indexShardReference = getIndexShardOperationsCounter(shardId); + // closed in finishAsFailed(e) in the case of error + indexShardReference = getIndexShardReferenceOnPrimary(shardId); + if (indexShardReference.isRelocated() == false) { + // execute locally Tuple primaryResponse = shardOperationOnPrimary(state.metaData(), request); if (logger.isTraceEnabled()) { logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version()); } - replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference); - } catch (Throwable e) { - if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { - if (logger.isTraceEnabled()) { - logger.trace("failed to execute [{}] on [{}]", e, request, shardId); - } - } else { - if (logger.isDebugEnabled()) { - logger.debug("failed to execute [{}] on [{}]", e, request, shardId); - } - } - finishAsFailed(e); - return; + ReplicationPhase replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference); + finishAndMoveToReplication(replicationPhase); + } else { + // delegate primary phase to relocation target + // it is safe to execute primary phase on relocation target as there are no more in-flight operations where primary + // phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase. + final ShardRouting primary = indexShardReference.routingEntry(); + indexShardReference.close(); + assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary; + DiscoveryNode relocatingNode = state.nodes().get(primary.relocatingNodeId()); + transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions, + TransportChannelResponseHandler.responseHandler(logger, TransportReplicationAction.this::newResponseInstance, channel, + "rerouting indexing to target primary " + primary)); } - finishAndMoveToReplication(replicationPhase); } /** @@ -723,10 +745,24 @@ public abstract class TransportReplicationAction shards; private final DiscoveryNodes nodes; private final boolean executeOnReplica; - private final String indexUUID; private final AtomicBoolean finished = new AtomicBoolean(); private final AtomicInteger success = new AtomicInteger(1); // We already wrote into the primary shard private final ConcurrentMap shardReplicaFailures = ConcurrentCollections.newConcurrentMap(); private final AtomicInteger pending; private final int totalShards; - private final Releasable indexShardReference; + private final IndexShardReference indexShardReference; public ReplicationPhase(ReplicaRequest replicaRequest, Response finalResponse, ShardId shardId, - TransportChannel channel, Releasable indexShardReference) { + TransportChannel channel, IndexShardReference indexShardReference) { this.replicaRequest = replicaRequest; this.channel = channel; this.finalResponse = finalResponse; @@ -768,7 +803,6 @@ public abstract class TransportReplicationAction TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, false, Setting.Scope.CLUSTER); + public static final Setting LIMIT_HEAP_SETTING = Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", false, Setting.Scope.CLUSTER); + public static final Setting WEIGHT_BYTES_SETTING = Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, false, Setting.Scope.CLUSTER); + public static final Setting WEIGHT_LONG_SETTING = Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, false, Setting.Scope.CLUSTER); + public static final Setting WEIGHT_INT_SETTING = Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, false, Setting.Scope.CLUSTER); + // object pages are less useful to us so we give them a lower weight by default + public static final Setting WEIGHT_OBJECTS_SETTING = Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, false, Setting.Scope.CLUSTER); private final Recycler bytePage; private final Recycler intPage; @@ -73,8 +79,8 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable { @Inject public PageCacheRecycler(Settings settings, ThreadPool threadPool) { super(settings); - final Type type = Type.parse(settings.get(TYPE)); - final long limit = settings.getAsMemory(LIMIT_HEAP, "10%").bytes(); + final Type type = TYPE_SETTING .get(settings); + final long limit = LIMIT_HEAP_SETTING .get(settings).bytes(); final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); final int searchThreadPoolSize = maximumSearchThreadPoolSize(threadPool, settings); @@ -91,11 +97,10 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable { // to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues // that would need to be addressed such as garbage collection of native memory or safety // of Unsafe writes. - final double bytesWeight = settings.getAsDouble(WEIGHT + ".bytes", 1d); - final double intsWeight = settings.getAsDouble(WEIGHT + ".ints", 1d); - final double longsWeight = settings.getAsDouble(WEIGHT + ".longs", 1d); - // object pages are less useful to us so we give them a lower weight by default - final double objectsWeight = settings.getAsDouble(WEIGHT + ".objects", 0.1d); + final double bytesWeight = WEIGHT_BYTES_SETTING .get(settings); + final double intsWeight = WEIGHT_INT_SETTING .get(settings); + final double longsWeight = WEIGHT_LONG_SETTING .get(settings); + final double objectsWeight = WEIGHT_OBJECTS_SETTING .get(settings); final double totalWeight = bytesWeight + intsWeight + longsWeight + objectsWeight; final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / BigArrays.PAGE_SIZE_IN_BYTES); @@ -190,7 +195,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable { return recycler; } - public static enum Type { + public enum Type { QUEUE { @Override Recycler build(Recycler.C c, int limit, int estimatedThreadPoolSize, int availableProcessors) { @@ -211,9 +216,6 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable { }; public static Type parse(String type) { - if (Strings.isNullOrEmpty(type)) { - return CONCURRENT; - } try { return Type.valueOf(type.toUpperCase(Locale.ROOT)); } catch (IllegalArgumentException e) { diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index e5d3f06f1ec..be9381a7fe6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -123,6 +123,11 @@ public interface ClusterStateTaskExecutor { return this == SUCCESS; } + public Throwable getFailure() { + assert !isSuccess(); + return failure; + } + /** * Handle the execution result with the provided consumers * @param onSuccess handler to invoke on success diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 9e57fe3a48a..0e61712b010 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -94,7 +94,7 @@ public class MappingUpdatedAction extends AbstractComponent { } } - public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Throwable { + public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Exception { updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null); } @@ -102,7 +102,7 @@ public class MappingUpdatedAction extends AbstractComponent { * Same as {@link #updateMappingOnMasterSynchronously(String, String, Mapping, TimeValue)} * using the default timeout. */ - public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Throwable { + public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Exception { updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout); } @@ -111,7 +111,7 @@ public class MappingUpdatedAction extends AbstractComponent { * {@code timeout}. When this method returns successfully mappings have * been applied to the master node and propagated to data nodes. */ - public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Throwable { + public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception { if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) { throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]"); } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 4aca9a4e235..fa703881bb2 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.action.shard; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -28,8 +29,9 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.NotMasterException; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.ShardRouting; @@ -46,6 +48,7 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; @@ -60,6 +63,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -125,17 +129,22 @@ public class ShardStateAction extends AbstractComponent { return ExceptionsHelper.unwrap(exp, MASTER_CHANNEL_EXCEPTIONS) != null; } - public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) { + /** + * Send a shard failed request to the master node to update the + * cluster state. + * + * @param shardRouting the shard to fail + * @param sourceShardRouting the source shard requesting the failure (must be the shard itself, or the primary shard) + * @param message the reason for the failure + * @param failure the underlying cause of the failure + * @param listener callback upon completion of the request + */ + public void shardFailed(final ShardRouting shardRouting, ShardRouting sourceShardRouting, final String message, @Nullable final Throwable failure, Listener listener) { ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); - ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure); + ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, sourceShardRouting, message, failure); sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardRoutingEntry, listener); } - public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) { - logger.trace("{} re-sending failed shard [{}], index UUID [{}], reason [{}]", shardRouting.shardId(), failure, shardRouting, indexUUID, message); - shardFailed(shardRouting, indexUUID, message, failure, listener); - } - // visible for testing protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardRoutingEntry shardRoutingEntry, Listener listener) { observer.waitForNextChange(new ClusterStateObserver.Listener() { @@ -231,15 +240,15 @@ public class ShardStateAction extends AbstractComponent { // partition tasks into those that correspond to shards // that exist versus do not exist - Map> partition = - tasks.stream().collect(Collectors.partitioningBy(task -> shardExists(currentState, task))); + Map> partition = + tasks.stream().collect(Collectors.groupingBy(task -> validateTask(currentState, task))); // tasks that correspond to non-existent shards are marked // as successful - batchResultBuilder.successes(partition.get(false)); + batchResultBuilder.successes(partition.getOrDefault(ValidationResult.SHARD_MISSING, Collections.emptyList())); ClusterState maybeUpdatedState = currentState; - List tasksToFail = partition.get(true); + List tasksToFail = partition.getOrDefault(ValidationResult.VALID, Collections.emptyList()); try { List failedShards = tasksToFail @@ -257,6 +266,15 @@ public class ShardStateAction extends AbstractComponent { batchResultBuilder.failures(tasksToFail, t); } + partition + .getOrDefault(ValidationResult.SOURCE_INVALID, Collections.emptyList()) + .forEach(task -> batchResultBuilder.failure( + task, + new NoLongerPrimaryShardException( + task.getShardRouting().shardId(), + "source shard [" + task.sourceShardRouting + "] is neither the local allocation nor the primary allocation") + )); + return batchResultBuilder.build(maybeUpdatedState); } @@ -265,17 +283,36 @@ public class ShardStateAction extends AbstractComponent { return allocationService.applyFailedShards(currentState, failedShards); } - private boolean shardExists(ClusterState currentState, ShardRoutingEntry task) { + private enum ValidationResult { + VALID, + SOURCE_INVALID, + SHARD_MISSING + } + + private ValidationResult validateTask(ClusterState currentState, ShardRoutingEntry task) { + + // non-local requests + if (!task.shardRouting.isSameAllocation(task.sourceShardRouting)) { + IndexShardRoutingTable indexShard = currentState.getRoutingTable().shardRoutingTableOrNull(task.shardRouting.shardId()); + if (indexShard == null) { + return ValidationResult.SOURCE_INVALID; + } + ShardRouting primaryShard = indexShard.primaryShard(); + if (primaryShard == null || !primaryShard.isSameAllocation(task.sourceShardRouting)) { + return ValidationResult.SOURCE_INVALID; + } + } + RoutingNodes.RoutingNodeIterator routingNodeIterator = currentState.getRoutingNodes().routingNodeIter(task.getShardRouting().currentNodeId()); if (routingNodeIterator != null) { for (ShardRouting maybe : routingNodeIterator) { if (task.getShardRouting().isSameAllocation(maybe)) { - return true; + return ValidationResult.VALID; } } } - return false; + return ValidationResult.SHARD_MISSING; } @Override @@ -291,9 +328,9 @@ public class ShardStateAction extends AbstractComponent { } } - public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String message, Listener listener) { + public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) { ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); - ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, null); + ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, shardRouting, message, null); sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardRoutingEntry, listener); } @@ -360,16 +397,16 @@ public class ShardStateAction extends AbstractComponent { public static class ShardRoutingEntry extends TransportRequest { ShardRouting shardRouting; - String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; + ShardRouting sourceShardRouting; String message; Throwable failure; public ShardRoutingEntry() { } - ShardRoutingEntry(ShardRouting shardRouting, String indexUUID, String message, @Nullable Throwable failure) { + ShardRoutingEntry(ShardRouting shardRouting, ShardRouting sourceShardRouting, String message, @Nullable Throwable failure) { this.shardRouting = shardRouting; - this.indexUUID = indexUUID; + this.sourceShardRouting = sourceShardRouting; this.message = message; this.failure = failure; } @@ -382,7 +419,7 @@ public class ShardStateAction extends AbstractComponent { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); shardRouting = readShardRoutingEntry(in); - indexUUID = in.readString(); + sourceShardRouting = readShardRoutingEntry(in); message = in.readString(); failure = in.readThrowable(); } @@ -391,18 +428,25 @@ public class ShardStateAction extends AbstractComponent { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardRouting.writeTo(out); - out.writeString(indexUUID); + sourceShardRouting.writeTo(out); out.writeString(message); out.writeThrowable(failure); } @Override public String toString() { - return "" + shardRouting + ", indexUUID [" + indexUUID + "], message [" + message + "], failure [" + ExceptionsHelper.detailedMessage(failure) + "]"; + return String.format( + Locale.ROOT, + "failed shard [%s], source shard [%s], message [%s], failure [%s]", + shardRouting, + sourceShardRouting, + message, + ExceptionsHelper.detailedMessage(failure)); } } public interface Listener { + default void onSuccess() { } @@ -423,6 +467,20 @@ public class ShardStateAction extends AbstractComponent { */ default void onFailure(final Throwable t) { } + + } + + public static class NoLongerPrimaryShardException extends ElasticsearchException { + + public NoLongerPrimaryShardException(ShardId shardId, String msg) { + super(msg); + setShard(shardId); + } + + public NoLongerPrimaryShardException(StreamInput in) throws IOException { + super(in); + } + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 6d81556eb2c..58e3ed6b644 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -43,6 +43,7 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.Predicate; /** @@ -137,6 +138,13 @@ public class RoutingTable implements Iterable, Diffable Optional.ofNullable(irt.shard(shardId.getId()))) + .orElse(null); + } + public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException { RoutingTableValidation validation = validate(metaData); if (!validation.valid()) { diff --git a/core/src/main/java/org/elasticsearch/common/cli/CheckFileCommand.java b/core/src/main/java/org/elasticsearch/common/cli/CheckFileCommand.java index 9635ae5e3ec..95502f16700 100644 --- a/core/src/main/java/org/elasticsearch/common/cli/CheckFileCommand.java +++ b/core/src/main/java/org/elasticsearch/common/cli/CheckFileCommand.java @@ -100,8 +100,9 @@ public abstract class CheckFileCommand extends CliTool.Command { Set permissionsBeforeWrite = entry.getValue(); Set permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey()); if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) { - terminal.printWarn("The file permissions of [%s] have changed from [%s] to [%s]", - entry.getKey(), PosixFilePermissions.toString(permissionsBeforeWrite), PosixFilePermissions.toString(permissionsAfterWrite)); + terminal.printWarn("The file permissions of [" + entry.getKey() + "] have changed " + + "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] " + + "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]"); terminal.printWarn("Please ensure that the user account running Elasticsearch has read access to this file!"); } } @@ -115,7 +116,7 @@ public abstract class CheckFileCommand extends CliTool.Command { String ownerBeforeWrite = entry.getValue(); String ownerAfterWrite = Files.getOwner(entry.getKey()).getName(); if (!ownerAfterWrite.equals(ownerBeforeWrite)) { - terminal.printWarn("WARN: Owner of file [%s] used to be [%s], but now is [%s]", entry.getKey(), ownerBeforeWrite, ownerAfterWrite); + terminal.printWarn("WARN: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]"); } } @@ -128,7 +129,7 @@ public abstract class CheckFileCommand extends CliTool.Command { String groupBeforeWrite = entry.getValue(); String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName(); if (!groupAfterWrite.equals(groupBeforeWrite)) { - terminal.printWarn("WARN: Group of file [%s] used to be [%s], but now is [%s]", entry.getKey(), groupBeforeWrite, groupAfterWrite); + terminal.printWarn("WARN: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]"); } } diff --git a/core/src/main/java/org/elasticsearch/common/cli/CliTool.java b/core/src/main/java/org/elasticsearch/common/cli/CliTool.java index 0d32b6e2779..17994eba439 100644 --- a/core/src/main/java/org/elasticsearch/common/cli/CliTool.java +++ b/core/src/main/java/org/elasticsearch/common/cli/CliTool.java @@ -19,14 +19,17 @@ package org.elasticsearch.common.cli; +import org.apache.commons.cli.AlreadySelectedException; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.MissingArgumentException; +import org.apache.commons.cli.MissingOptionException; +import org.apache.commons.cli.UnrecognizedOptionException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import java.io.IOException; import java.util.Locale; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; @@ -50,7 +53,7 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; public abstract class CliTool { // based on sysexits.h - public static enum ExitStatus { + public enum ExitStatus { OK(0), OK_AND_EXIT(0), USAGE(64), /* command line usage error */ @@ -69,23 +72,13 @@ public abstract class CliTool { final int status; - private ExitStatus(int status) { + ExitStatus(int status) { this.status = status; } public int status() { return status; } - - public static ExitStatus fromStatus(int status) { - for (ExitStatus exitStatus : values()) { - if (exitStatus.status() == status) { - return exitStatus; - } - } - - return null; - } } protected final Terminal terminal; @@ -108,7 +101,7 @@ public abstract class CliTool { settings = env.settings(); } - public final ExitStatus execute(String... args) { + public final ExitStatus execute(String... args) throws Exception { // first lets see if the user requests tool help. We're doing it only if // this is a multi-command tool. If it's a single command tool, the -h/--help @@ -132,7 +125,7 @@ public abstract class CliTool { String cmdName = args[0]; cmd = config.cmd(cmdName); if (cmd == null) { - terminal.printError("unknown command [%s]. Use [-h] option to list available commands", cmdName); + terminal.printError("unknown command [" + cmdName + "]. Use [-h] option to list available commands"); return ExitStatus.USAGE; } @@ -146,23 +139,11 @@ public abstract class CliTool { } } - Command command = null; try { - - command = parse(cmd, args); - return command.execute(settings, env); - } catch (IOException ioe) { - terminal.printError(ioe); - return ExitStatus.IO_ERROR; - } catch (IllegalArgumentException ilae) { - terminal.printError(ilae); - return ExitStatus.USAGE; - } catch (Throwable t) { - terminal.printError(t); - if (command == null) { - return ExitStatus.USAGE; - } - return ExitStatus.CODE_ERROR; + return parse(cmd, args).execute(settings, env); + } catch (UserError error) { + terminal.printError(error.getMessage()); + return error.exitStatus; } } @@ -177,7 +158,13 @@ public abstract class CliTool { if (cli.hasOption("h")) { return helpCmd(cmd); } - cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption()); + try { + cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption()); + } catch (AlreadySelectedException|MissingArgumentException|MissingOptionException|UnrecognizedOptionException e) { + // intentionally drop the stack trace here as these are really user errors, + // the stack trace into cli parsing lib is not important + throw new UserError(ExitStatus.USAGE, e.toString()); + } Terminal.Verbosity verbosity = Terminal.Verbosity.resolve(cli); terminal.verbosity(verbosity); return parse(cmd.name(), cli); diff --git a/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java b/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java index 4f694e9af38..5a463258eb1 100644 --- a/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java +++ b/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java @@ -50,7 +50,7 @@ public class HelpPrinter { } }); } catch (IOException ioe) { - ioe.printStackTrace(terminal.writer()); + throw new RuntimeException(ioe); } terminal.println(); } diff --git a/core/src/main/java/org/elasticsearch/common/cli/Terminal.java b/core/src/main/java/org/elasticsearch/common/cli/Terminal.java index 82898b3e457..68229f69c33 100644 --- a/core/src/main/java/org/elasticsearch/common/cli/Terminal.java +++ b/core/src/main/java/org/elasticsearch/common/cli/Terminal.java @@ -35,8 +35,6 @@ import java.util.Locale; @SuppressForbidden(reason = "System#out") public abstract class Terminal { - public static final String DEBUG_SYSTEM_PROPERTY = "es.cli.debug"; - public static final Terminal DEFAULT = ConsoleTerminal.supported() ? new ConsoleTerminal() : new SystemTerminal(); public static enum Verbosity { @@ -64,7 +62,6 @@ public abstract class Terminal { } private Verbosity verbosity = Verbosity.NORMAL; - private final boolean isDebugEnabled; public Terminal() { this(Verbosity.NORMAL); @@ -72,7 +69,6 @@ public abstract class Terminal { public Terminal(Verbosity verbosity) { this.verbosity = verbosity; - this.isDebugEnabled = "true".equals(System.getProperty(DEBUG_SYSTEM_PROPERTY, "false")); } public void verbosity(Verbosity verbosity) { @@ -93,46 +89,37 @@ public abstract class Terminal { println(Verbosity.NORMAL); } - public void println(String msg, Object... args) { - println(Verbosity.NORMAL, msg, args); + public void println(String msg) { + println(Verbosity.NORMAL, msg); } - public void print(String msg, Object... args) { - print(Verbosity.NORMAL, msg, args); + public void print(String msg) { + print(Verbosity.NORMAL, msg); } public void println(Verbosity verbosity) { println(verbosity, ""); } - public void println(Verbosity verbosity, String msg, Object... args) { - print(verbosity, msg + System.lineSeparator(), args); + public void println(Verbosity verbosity, String msg) { + print(verbosity, msg + System.lineSeparator()); } - public void print(Verbosity verbosity, String msg, Object... args) { + public void print(Verbosity verbosity, String msg) { if (this.verbosity.enabled(verbosity)) { - doPrint(msg, args); + doPrint(msg); } } - public void printError(String msg, Object... args) { - println(Verbosity.SILENT, "ERROR: " + msg, args); + public void printError(String msg) { + println(Verbosity.SILENT, "ERROR: " + msg); } - public void printError(Throwable t) { - printError("%s", t.toString()); - if (isDebugEnabled) { - printStackTrace(t); - } + public void printWarn(String msg) { + println(Verbosity.SILENT, "WARN: " + msg); } - public void printWarn(String msg, Object... args) { - println(Verbosity.SILENT, "WARN: " + msg, args); - } - - protected abstract void doPrint(String msg, Object... args); - - public abstract PrintWriter writer(); + protected abstract void doPrint(String msg); private static class ConsoleTerminal extends Terminal { @@ -143,8 +130,8 @@ public abstract class Terminal { } @Override - public void doPrint(String msg, Object... args) { - console.printf(msg, args); + public void doPrint(String msg) { + console.printf("%s", msg); console.flush(); } @@ -158,11 +145,6 @@ public abstract class Terminal { return console.readPassword(text, args); } - @Override - public PrintWriter writer() { - return console.writer(); - } - @Override public void printStackTrace(Throwable t) { t.printStackTrace(console.writer()); @@ -175,13 +157,13 @@ public abstract class Terminal { private final PrintWriter printWriter = new PrintWriter(System.out); @Override - public void doPrint(String msg, Object... args) { - System.out.print(String.format(Locale.ROOT, msg, args)); + public void doPrint(String msg) { + System.out.print(msg); } @Override public String readText(String text, Object... args) { - print(text, args); + print(text); BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); try { return reader.readLine(); @@ -199,10 +181,5 @@ public abstract class Terminal { public void printStackTrace(Throwable t) { t.printStackTrace(printWriter); } - - @Override - public PrintWriter writer() { - return printWriter; - } } } diff --git a/core/src/test/java/org/elasticsearch/plugins/loading/classpath/InClassPathPlugin.java b/core/src/main/java/org/elasticsearch/common/cli/UserError.java similarity index 61% rename from core/src/test/java/org/elasticsearch/plugins/loading/classpath/InClassPathPlugin.java rename to core/src/main/java/org/elasticsearch/common/cli/UserError.java index 79b1f244eff..ad709830885 100644 --- a/core/src/test/java/org/elasticsearch/plugins/loading/classpath/InClassPathPlugin.java +++ b/core/src/main/java/org/elasticsearch/common/cli/UserError.java @@ -17,19 +17,19 @@ * under the License. */ -package org.elasticsearch.plugins.loading.classpath; +package org.elasticsearch.common.cli; -import org.elasticsearch.plugins.Plugin; +/** + * An exception representing a user fixable problem in {@link CliTool} usage. + */ +public class UserError extends Exception { -public class InClassPathPlugin extends Plugin { + /** The exist status the cli should use when catching this user error. */ + public final CliTool.ExitStatus exitStatus; - @Override - public String name() { - return "in-classpath-plugin"; - } - - @Override - public String description() { - return "A plugin defined in class path"; + /** Constructs a UserError with an exit status and message to show the user. */ + public UserError(CliTool.ExitStatus exitStatus, String msg) { + super(msg); + this.exitStatus = exitStatus; } } diff --git a/core/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java b/core/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java deleted file mode 100644 index b99ef895430..00000000000 --- a/core/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java +++ /dev/null @@ -1,488 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.http.client; - -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.Build; -import org.elasticsearch.ElasticsearchCorruptionException; -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.Version; -import org.elasticsearch.common.Base64; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.common.unit.TimeValue; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.PrintWriter; -import java.net.HttpURLConnection; -import java.net.URL; -import java.net.URLConnection; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.nio.file.attribute.FileTime; -import java.util.List; - -/** - * - */ -public class HttpDownloadHelper { - - private boolean useTimestamp = false; - private boolean skipExisting = false; - - public boolean download(URL source, Path dest, @Nullable DownloadProgress progress, TimeValue timeout) throws Exception { - if (Files.exists(dest) && skipExisting) { - return true; - } - - //don't do any progress, unless asked - if (progress == null) { - progress = new NullProgress(); - } - - //set the timestamp to the file date. - long timestamp = 0; - - boolean hasTimestamp = false; - if (useTimestamp && Files.exists(dest) ) { - timestamp = Files.getLastModifiedTime(dest).toMillis(); - hasTimestamp = true; - } - - GetThread getThread = new GetThread(source, dest, hasTimestamp, timestamp, progress); - - try { - getThread.setDaemon(true); - getThread.start(); - getThread.join(timeout.millis()); - - if (getThread.isAlive()) { - throw new ElasticsearchTimeoutException("The GET operation took longer than " + timeout + ", stopping it."); - } - } - catch (InterruptedException ie) { - return false; - } finally { - getThread.closeStreams(); - } - - return getThread.wasSuccessful(); - } - - public interface Checksummer { - /** Return the hex string for the given byte array */ - String checksum(byte[] filebytes); - /** Human-readable name for the checksum format */ - String name(); - } - - /** Checksummer for SHA1 */ - public static Checksummer SHA1_CHECKSUM = new Checksummer() { - @Override - public String checksum(byte[] filebytes) { - return MessageDigests.toHexString(MessageDigests.sha1().digest(filebytes)); - } - - @Override - public String name() { - return "SHA1"; - } - }; - - /** Checksummer for MD5 */ - public static Checksummer MD5_CHECKSUM = new Checksummer() { - @Override - public String checksum(byte[] filebytes) { - return MessageDigests.toHexString(MessageDigests.md5().digest(filebytes)); - } - - @Override - public String name() { - return "MD5"; - } - }; - - /** - * Download the given checksum URL to the destination and check the checksum - * @param checksumURL URL for the checksum file - * @param originalFile original file to calculate checksum of - * @param checksumFile destination to download the checksum file to - * @param hashFunc class used to calculate the checksum of the file - * @return true if the checksum was validated, false if it did not exist - * @throws Exception if the checksum failed to match - */ - public boolean downloadAndVerifyChecksum(URL checksumURL, Path originalFile, Path checksumFile, - @Nullable DownloadProgress progress, - TimeValue timeout, Checksummer hashFunc) throws Exception { - try { - if (download(checksumURL, checksumFile, progress, timeout)) { - byte[] fileBytes = Files.readAllBytes(originalFile); - List checksumLines = Files.readAllLines(checksumFile, StandardCharsets.UTF_8); - if (checksumLines.size() != 1) { - throw new ElasticsearchCorruptionException("invalid format for checksum file (" + - hashFunc.name() + "), expected 1 line, got: " + checksumLines.size()); - } - String checksumHex = checksumLines.get(0); - String fileHex = hashFunc.checksum(fileBytes); - if (fileHex.equals(checksumHex) == false) { - throw new ElasticsearchCorruptionException("incorrect hash (" + hashFunc.name() + - "), file hash: [" + fileHex + "], expected: [" + checksumHex + "]"); - } - return true; - } - } catch (FileNotFoundException | NoSuchFileException e) { - // checksum file doesn't exist - return false; - } finally { - IOUtils.deleteFilesIgnoringExceptions(checksumFile); - } - return false; - } - - /** - * Interface implemented for reporting - * progress of downloading. - */ - public interface DownloadProgress { - /** - * begin a download - */ - void beginDownload(); - - /** - * tick handler - */ - void onTick(); - - /** - * end a download - */ - void endDownload(); - } - - /** - * do nothing with progress info - */ - public static class NullProgress implements DownloadProgress { - - /** - * begin a download - */ - @Override - public void beginDownload() { - - } - - /** - * tick handler - */ - @Override - public void onTick() { - } - - /** - * end a download - */ - @Override - public void endDownload() { - - } - } - - /** - * verbose progress system prints to some output stream - */ - public static class VerboseProgress implements DownloadProgress { - private int dots = 0; - // CheckStyle:VisibilityModifier OFF - bc - PrintWriter writer; - // CheckStyle:VisibilityModifier ON - - /** - * Construct a verbose progress reporter. - * - * @param writer the output stream. - */ - public VerboseProgress(PrintWriter writer) { - this.writer = writer; - } - - /** - * begin a download - */ - @Override - public void beginDownload() { - writer.print("Downloading "); - dots = 0; - } - - /** - * tick handler - */ - @Override - public void onTick() { - writer.print("."); - if (dots++ > 50) { - writer.flush(); - dots = 0; - } - } - - /** - * end a download - */ - @Override - public void endDownload() { - writer.println("DONE"); - writer.flush(); - } - } - - private class GetThread extends Thread { - - private final URL source; - private final Path dest; - private final boolean hasTimestamp; - private final long timestamp; - private final DownloadProgress progress; - - private boolean success = false; - private IOException ioexception = null; - private InputStream is = null; - private OutputStream os = null; - private URLConnection connection; - private int redirections = 0; - - GetThread(URL source, Path dest, boolean h, long t, DownloadProgress p) { - this.source = source; - this.dest = dest; - hasTimestamp = h; - timestamp = t; - progress = p; - } - - @Override - public void run() { - try { - success = get(); - } catch (IOException ioex) { - ioexception = ioex; - } - } - - private boolean get() throws IOException { - - connection = openConnection(source); - - if (connection == null) { - return false; - } - - boolean downloadSucceeded = downloadFile(); - - //if (and only if) the use file time option is set, then - //the saved file now has its timestamp set to that of the - //downloaded file - if (downloadSucceeded && useTimestamp) { - updateTimeStamp(); - } - - return downloadSucceeded; - } - - - private boolean redirectionAllowed(URL aSource, URL aDest) throws IOException { - // Argh, github does this... -// if (!(aSource.getProtocol().equals(aDest.getProtocol()) || ("http" -// .equals(aSource.getProtocol()) && "https".equals(aDest -// .getProtocol())))) { -// String message = "Redirection detected from " -// + aSource.getProtocol() + " to " + aDest.getProtocol() -// + ". Protocol switch unsafe, not allowed."; -// throw new IOException(message); -// } - - redirections++; - if (redirections > 5) { - String message = "More than " + 5 + " times redirected, giving up"; - throw new IOException(message); - } - - - return true; - } - - private URLConnection openConnection(URL aSource) throws IOException { - - // set up the URL connection - URLConnection connection = aSource.openConnection(); - // modify the headers - // NB: things like user authentication could go in here too. - if (hasTimestamp) { - connection.setIfModifiedSince(timestamp); - } - - // in case the plugin manager is its own project, this can become an authenticator - boolean isSecureProcotol = "https".equalsIgnoreCase(aSource.getProtocol()); - boolean isAuthInfoSet = !Strings.isNullOrEmpty(aSource.getUserInfo()); - if (isAuthInfoSet) { - if (!isSecureProcotol) { - throw new IOException("Basic auth is only supported for HTTPS!"); - } - String basicAuth = Base64.encodeBytes(aSource.getUserInfo().getBytes(StandardCharsets.UTF_8)); - connection.setRequestProperty("Authorization", "Basic " + basicAuth); - } - - if (connection instanceof HttpURLConnection) { - ((HttpURLConnection) connection).setInstanceFollowRedirects(false); - connection.setUseCaches(true); - connection.setConnectTimeout(5000); - } - connection.setRequestProperty("ES-Version", Version.CURRENT.toString()); - connection.setRequestProperty("ES-Build-Hash", Build.CURRENT.shortHash()); - connection.setRequestProperty("User-Agent", "elasticsearch-plugin-manager"); - - // connect to the remote site (may take some time) - connection.connect(); - - // First check on a 301 / 302 (moved) response (HTTP only) - if (connection instanceof HttpURLConnection) { - HttpURLConnection httpConnection = (HttpURLConnection) connection; - int responseCode = httpConnection.getResponseCode(); - if (responseCode == HttpURLConnection.HTTP_MOVED_PERM || - responseCode == HttpURLConnection.HTTP_MOVED_TEMP || - responseCode == HttpURLConnection.HTTP_SEE_OTHER) { - String newLocation = httpConnection.getHeaderField("Location"); - URL newURL = new URL(newLocation); - if (!redirectionAllowed(aSource, newURL)) { - return null; - } - return openConnection(newURL); - } - // next test for a 304 result (HTTP only) - long lastModified = httpConnection.getLastModified(); - if (responseCode == HttpURLConnection.HTTP_NOT_MODIFIED - || (lastModified != 0 && hasTimestamp && timestamp >= lastModified)) { - // not modified so no file download. just return - // instead and trace out something so the user - // doesn't think that the download happened when it - // didn't - return null; - } - // test for 401 result (HTTP only) - if (responseCode == HttpURLConnection.HTTP_UNAUTHORIZED) { - String message = "HTTP Authorization failure"; - throw new IOException(message); - } - } - - //REVISIT: at this point even non HTTP connections may - //support the if-modified-since behaviour -we just check - //the date of the content and skip the write if it is not - //newer. Some protocols (FTP) don't include dates, of - //course. - return connection; - } - - private boolean downloadFile() throws FileNotFoundException, IOException { - IOException lastEx = null; - for (int i = 0; i < 3; i++) { - // this three attempt trick is to get round quirks in different - // Java implementations. Some of them take a few goes to bind - // property; we ignore the first couple of such failures. - try { - is = connection.getInputStream(); - break; - } catch (IOException ex) { - lastEx = ex; - } - } - if (is == null) { - throw lastEx; - } - - os = Files.newOutputStream(dest); - progress.beginDownload(); - boolean finished = false; - try { - byte[] buffer = new byte[1024 * 100]; - int length; - while (!isInterrupted() && (length = is.read(buffer)) >= 0) { - os.write(buffer, 0, length); - progress.onTick(); - } - finished = !isInterrupted(); - } finally { - if (!finished) { - // we have started to (over)write dest, but failed. - // Try to delete the garbage we'd otherwise leave - // behind. - IOUtils.closeWhileHandlingException(os, is); - IOUtils.deleteFilesIgnoringExceptions(dest); - } else { - IOUtils.close(os, is); - } - } - progress.endDownload(); - return true; - } - - private void updateTimeStamp() throws IOException { - long remoteTimestamp = connection.getLastModified(); - if (remoteTimestamp != 0) { - Files.setLastModifiedTime(dest, FileTime.fromMillis(remoteTimestamp)); - } - } - - /** - * Has the download completed successfully? - *

- * Re-throws any exception caught during executaion.

- */ - boolean wasSuccessful() throws IOException { - if (ioexception != null) { - throw ioexception; - } - return success; - } - - /** - * Closes streams, interrupts the download, may delete the - * output file. - */ - void closeStreams() throws IOException { - interrupt(); - if (success) { - IOUtils.close(is, os); - } else { - IOUtils.closeWhileHandlingException(is, os); - if (dest != null && Files.exists(dest)) { - IOUtils.deleteFilesIgnoringExceptions(dest); - } - } - } - } -} diff --git a/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java b/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java index 08761f84ff5..ab65f090364 100644 --- a/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java +++ b/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java @@ -52,33 +52,6 @@ public final class FileSystemUtils { private FileSystemUtils() {} // only static methods - /** - * Returns true iff a file under the given root has one of the given extensions. This method - * will travers directories recursively and will terminate once any of the extensions was found. This - * methods will not follow any links. - * - * @param root the root directory to travers. Must be a directory - * @param extensions the file extensions to look for - * @return true iff a file under the given root has one of the given extensions, otherwise false - * @throws IOException if an IOException occurs or if the given root path is not a directory. - */ - public static boolean hasExtensions(Path root, final String... extensions) throws IOException { - final AtomicBoolean retVal = new AtomicBoolean(false); - Files.walkFileTree(root, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - for (String extension : extensions) { - if (file.getFileName().toString().endsWith(extension)) { - retVal.set(true); - return FileVisitResult.TERMINATE; - } - } - return super.visitFile(file, attrs); - } - }); - return retVal.get(); - } - /** * Returns true iff one of the files exists otherwise false */ @@ -168,167 +141,6 @@ public final class FileSystemUtils { return new BufferedReader(reader); } - /** - * This utility copy a full directory content (excluded) under - * a new directory but without overwriting existing files. - * - * When a file already exists in destination dir, the source file is copied under - * destination directory but with a suffix appended if set or source file is ignored - * if suffix is not set (null). - * @param source Source directory (for example /tmp/es/src) - * @param destination Destination directory (destination directory /tmp/es/dst) - * @param suffix When not null, files are copied with a suffix appended to the original name (eg: ".new") - * When null, files are ignored - */ - public static void moveFilesWithoutOverwriting(Path source, final Path destination, final String suffix) throws IOException { - - // Create destination dir - Files.createDirectories(destination); - - final int configPathRootLevel = source.getNameCount(); - - // We walk through the file tree from - Files.walkFileTree(source, new SimpleFileVisitor() { - private Path buildPath(Path path) { - return destination.resolve(path); - } - - @Override - public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { - // We are now in dir. We need to remove root of config files to have a relative path - - // If we are not walking in root dir, we might be able to copy its content - // if it does not already exist - if (configPathRootLevel != dir.getNameCount()) { - Path subpath = dir.subpath(configPathRootLevel, dir.getNameCount()); - Path path = buildPath(subpath); - if (!Files.exists(path)) { - // We just move the structure to new dir - // we can't do atomic move here since src / dest might be on different mounts? - move(dir, path); - // We just ignore sub files from here - return FileVisitResult.SKIP_SUBTREE; - } - } - - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - Path subpath = null; - - if (configPathRootLevel != file.getNameCount()) { - subpath = file.subpath(configPathRootLevel, file.getNameCount()); - } - Path path = buildPath(subpath); - - if (!Files.exists(path)) { - // We just move the new file to new dir - move(file, path); - } else if (suffix != null) { - if (!isSameFile(file, path)) { - // If it already exists we try to copy this new version appending suffix to its name - path = path.resolveSibling(path.getFileName().toString().concat(suffix)); - // We just move the file to new dir but with a new name (appended with suffix) - Files.move(file, path, StandardCopyOption.REPLACE_EXISTING); - } - } - - return FileVisitResult.CONTINUE; - } - - /** - * Compares the content of two paths by comparing them - */ - private boolean isSameFile(Path first, Path second) throws IOException { - // do quick file size comparison before hashing - boolean sameFileSize = Files.size(first) == Files.size(second); - if (!sameFileSize) { - return false; - } - - byte[] firstBytes = Files.readAllBytes(first); - byte[] secondBytes = Files.readAllBytes(second); - return Arrays.equals(firstBytes, secondBytes); - } - }); - } - - /** - * Copy recursively a dir to a new location - * @param source source dir - * @param destination destination dir - */ - public static void copyDirectoryRecursively(Path source, Path destination) throws IOException { - Files.walkFileTree(source, new TreeCopier(source, destination, false)); - } - - /** - * Move or rename a file to a target file. This method supports moving a file from - * different filesystems (not supported by Files.move()). - * - * @param source source file - * @param destination destination file - */ - public static void move(Path source, Path destination) throws IOException { - try { - // We can't use atomic move here since source & target can be on different filesystems. - Files.move(source, destination); - } catch (DirectoryNotEmptyException e) { - Files.walkFileTree(source, new TreeCopier(source, destination, true)); - } - } - - // TODO: note that this will fail if source and target are on different NIO.2 filesystems. - - static class TreeCopier extends SimpleFileVisitor { - private final Path source; - private final Path target; - private final boolean delete; - - TreeCopier(Path source, Path target, boolean delete) { - this.source = source; - this.target = target; - this.delete = delete; - } - - @Override - public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) { - Path newDir = target.resolve(source.relativize(dir)); - try { - Files.copy(dir, newDir); - } catch (FileAlreadyExistsException x) { - // We ignore this - } catch (IOException x) { - return SKIP_SUBTREE; - } - return CONTINUE; - } - - @Override - public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { - if (delete) { - IOUtils.rm(dir); - } - return CONTINUE; - } - - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - Path newFile = target.resolve(source.relativize(file)); - try { - Files.copy(file, newFile); - if (delete) { - Files.deleteIfExists(file); - } - } catch (IOException x) { - // We ignore this - } - return CONTINUE; - } - } - /** * Returns an array of all files in the given directory matching. */ diff --git a/core/src/main/java/org/elasticsearch/common/logging/log4j/TerminalAppender.java b/core/src/main/java/org/elasticsearch/common/logging/log4j/TerminalAppender.java index 3c60c44d3e3..6e626060542 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/log4j/TerminalAppender.java +++ b/core/src/main/java/org/elasticsearch/common/logging/log4j/TerminalAppender.java @@ -25,7 +25,7 @@ import org.apache.log4j.spi.LoggingEvent; import org.elasticsearch.common.cli.Terminal; /** - * TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginManagerCliParser. + * TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli. * */ public class TerminalAppender extends AppenderSkeleton { @Override diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 593d586ba96..616a7328ecc 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.cluster.ClusterModule; @@ -56,7 +57,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.PrimaryShardAllocator; -import org.elasticsearch.http.netty.NettyHttpServerTransport; +import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.analysis.HunspellService; @@ -112,9 +113,9 @@ public final class ClusterSettings extends AbstractScopedSettings { @Override public boolean hasChanged(Settings current, Settings previous) { return current.filter(loggerPredicate).getAsMap().equals(previous.filter(loggerPredicate).getAsMap()) == false; - } + } - @Override + @Override public Settings getValue(Settings current, Settings previous) { Settings.Builder builder = Settings.builder(); builder.put(current.filter(loggerPredicate).getAsMap()); @@ -130,7 +131,7 @@ public final class ClusterSettings extends AbstractScopedSettings { return builder.build(); } - @Override + @Override public void apply(Settings value, Settings current, Settings previous) { for (String key : value.getAsMap().keySet()) { assert loggerPredicate.test(key); @@ -141,90 +142,103 @@ public final class ClusterSettings extends AbstractScopedSettings { } else { ESLoggerFactory.getLogger(component).setLevel(value.get(key)); } - } - } + } + } }; public static Set> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>( Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, - TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client - TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT, - TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME, - AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, - BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, - BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, - BalancedShardsAllocator.THRESHOLD_SETTING, - ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, - ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, - EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, - EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, - ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING, - FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, - FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, - FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, - FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING, - FsRepository.REPOSITORIES_COMPRESS_SETTING, - FsRepository.REPOSITORIES_LOCATION_SETTING, - IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, - IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, + TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client + TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT, + TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME, + AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, + BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.THRESHOLD_SETTING, + ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, + ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, + FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING, + FsRepository.REPOSITORIES_COMPRESS_SETTING, + FsRepository.REPOSITORIES_LOCATION_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING, IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING, - IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, - MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, - MetaData.SETTING_READ_ONLY_SETTING, - RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, - RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, - RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, - RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, - ThreadPool.THREADPOOL_GROUP_SETTING, - ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, - ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, - ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, - ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, - SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, - DestructiveOperations.REQUIRES_NAME_SETTING, - DiscoverySettings.PUBLISH_TIMEOUT_SETTING, - DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, - DiscoverySettings.COMMIT_TIMEOUT_SETTING, - DiscoverySettings.NO_MASTER_BLOCK_SETTING, - GatewayService.EXPECTED_DATA_NODES_SETTING, - GatewayService.EXPECTED_MASTER_NODES_SETTING, - GatewayService.EXPECTED_NODES_SETTING, - GatewayService.RECOVER_AFTER_DATA_NODES_SETTING, - GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING, - GatewayService.RECOVER_AFTER_NODES_SETTING, - GatewayService.RECOVER_AFTER_TIME_SETTING, - NetworkModule.HTTP_ENABLED, - NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS, - NettyHttpServerTransport.SETTING_CORS_ENABLED, - NettyHttpServerTransport.SETTING_CORS_MAX_AGE, - NettyHttpServerTransport.SETTING_HTTP_DETAILED_ERRORS_ENABLED, - NettyHttpServerTransport.SETTING_PIPELINING, - HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, - HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, - InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, - SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, - ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, - TransportService.TRACE_LOG_EXCLUDE_SETTING, - TransportService.TRACE_LOG_INCLUDE_SETTING, - TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, - ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, - InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, - HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, - HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, - Transport.TRANSPORT_TCP_COMPRESS, + IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, + MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, + MetaData.SETTING_READ_ONLY_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, + RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, + ThreadPool.THREADPOOL_GROUP_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, + SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, + DestructiveOperations.REQUIRES_NAME_SETTING, + DiscoverySettings.PUBLISH_TIMEOUT_SETTING, + DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, + DiscoverySettings.COMMIT_TIMEOUT_SETTING, + DiscoverySettings.NO_MASTER_BLOCK_SETTING, + GatewayService.EXPECTED_DATA_NODES_SETTING, + GatewayService.EXPECTED_MASTER_NODES_SETTING, + GatewayService.EXPECTED_NODES_SETTING, + GatewayService.RECOVER_AFTER_DATA_NODES_SETTING, + GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING, + GatewayService.RECOVER_AFTER_NODES_SETTING, + GatewayService.RECOVER_AFTER_TIME_SETTING, + NetworkModule.HTTP_ENABLED, + HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS, + HttpTransportSettings.SETTING_CORS_ENABLED, + HttpTransportSettings.SETTING_CORS_MAX_AGE, + HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED, + HttpTransportSettings.SETTING_PIPELINING, + HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN, + HttpTransportSettings.SETTING_HTTP_PORT, + HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT, + HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS, + HttpTransportSettings.SETTING_HTTP_COMPRESSION, + HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL, + HttpTransportSettings.SETTING_CORS_ALLOW_METHODS, + HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS, + HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED, + HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH, + HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE, + HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE, + HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH, + HttpTransportSettings.SETTING_HTTP_RESET_COOKIES, + HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, + InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, + TransportService.TRACE_LOG_EXCLUDE_SETTING, + TransportService.TRACE_LOG_INCLUDE_SETTING, + TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, + ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, + InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, + Transport.TRANSPORT_TCP_COMPRESS, TransportSettings.TRANSPORT_PROFILES_SETTING, TransportSettings.HOST, TransportSettings.PUBLISH_HOST, @@ -253,72 +267,72 @@ public final class ClusterSettings extends AbstractScopedSettings { NettyTransport.TCP_SEND_BUFFER_SIZE, NettyTransport.TCP_RECEIVE_BUFFER_SIZE, NettyTransport.TCP_BLOCKING_SERVER, - NetworkService.GLOBAL_NETWORK_HOST_SETTING, - NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING, - NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING, - NetworkService.TcpSettings.TCP_NO_DELAY, - NetworkService.TcpSettings.TCP_KEEP_ALIVE, - NetworkService.TcpSettings.TCP_REUSE_ADDRESS, - NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, - NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, - NetworkService.TcpSettings.TCP_BLOCKING, - NetworkService.TcpSettings.TCP_BLOCKING_SERVER, - NetworkService.TcpSettings.TCP_BLOCKING_CLIENT, - NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT, - IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, - IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, - PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING, - ScriptService.SCRIPT_CACHE_SIZE_SETTING, - IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING, - IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY, - IndicesRequestCache.INDICES_CACHE_QUERY_SIZE, - IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE, + NetworkService.GLOBAL_NETWORK_HOST_SETTING, + NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING, + NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING, + NetworkService.TcpSettings.TCP_NO_DELAY, + NetworkService.TcpSettings.TCP_KEEP_ALIVE, + NetworkService.TcpSettings.TCP_REUSE_ADDRESS, + NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, + NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, + NetworkService.TcpSettings.TCP_BLOCKING, + NetworkService.TcpSettings.TCP_BLOCKING_SERVER, + NetworkService.TcpSettings.TCP_BLOCKING_CLIENT, + NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT, + IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, + IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, + PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING, + ScriptService.SCRIPT_CACHE_SIZE_SETTING, + IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING, + IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY, + IndicesRequestCache.INDICES_CACHE_QUERY_SIZE, + IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE, IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL, - HunspellService.HUNSPELL_LAZY_LOAD, - HunspellService.HUNSPELL_IGNORE_CASE, - HunspellService.HUNSPELL_DICTIONARY_OPTIONS, - IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, - Environment.PATH_CONF_SETTING, - Environment.PATH_DATA_SETTING, - Environment.PATH_HOME_SETTING, - Environment.PATH_LOGS_SETTING, - Environment.PATH_PLUGINS_SETTING, - Environment.PATH_REPO_SETTING, - Environment.PATH_SCRIPTS_SETTING, - Environment.PATH_SHARED_DATA_SETTING, - Environment.PIDFILE_SETTING, - DiscoveryService.DISCOVERY_SEED_SETTING, - DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING, - DiscoveryModule.DISCOVERY_TYPE_SETTING, - DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING, - FaultDetection.PING_RETRIES_SETTING, - FaultDetection.PING_TIMEOUT_SETTING, - FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING, - FaultDetection.PING_INTERVAL_SETTING, - FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING, - ZenDiscovery.PING_TIMEOUT_SETTING, - ZenDiscovery.JOIN_TIMEOUT_SETTING, - ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING, - ZenDiscovery.JOIN_RETRY_DELAY_SETTING, - ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING, - ZenDiscovery.SEND_LEAVE_REQUEST_SETTING, - ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING, - ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING, - ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING, - UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, - UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, - SearchService.DEFAULT_KEEPALIVE_SETTING, - SearchService.KEEPALIVE_INTERVAL_SETTING, - Node.WRITE_PORTS_FIELD_SETTING, + HunspellService.HUNSPELL_LAZY_LOAD, + HunspellService.HUNSPELL_IGNORE_CASE, + HunspellService.HUNSPELL_DICTIONARY_OPTIONS, + IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, + Environment.PATH_CONF_SETTING, + Environment.PATH_DATA_SETTING, + Environment.PATH_HOME_SETTING, + Environment.PATH_LOGS_SETTING, + Environment.PATH_PLUGINS_SETTING, + Environment.PATH_REPO_SETTING, + Environment.PATH_SCRIPTS_SETTING, + Environment.PATH_SHARED_DATA_SETTING, + Environment.PIDFILE_SETTING, + DiscoveryService.DISCOVERY_SEED_SETTING, + DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING, + DiscoveryModule.DISCOVERY_TYPE_SETTING, + DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING, + FaultDetection.PING_RETRIES_SETTING, + FaultDetection.PING_TIMEOUT_SETTING, + FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING, + FaultDetection.PING_INTERVAL_SETTING, + FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING, + ZenDiscovery.PING_TIMEOUT_SETTING, + ZenDiscovery.JOIN_TIMEOUT_SETTING, + ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING, + ZenDiscovery.JOIN_RETRY_DELAY_SETTING, + ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING, + ZenDiscovery.SEND_LEAVE_REQUEST_SETTING, + ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING, + ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING, + ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING, + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, + SearchService.DEFAULT_KEEPALIVE_SETTING, + SearchService.KEEPALIVE_INTERVAL_SETTING, + Node.WRITE_PORTS_FIELD_SETTING, Node.NODE_CLIENT_SETTING, Node.NODE_DATA_SETTING, Node.NODE_MASTER_SETTING, Node.NODE_LOCAL_SETTING, Node.NODE_MODE_SETTING, Node.NODE_INGEST_SETTING, - URLRepository.ALLOWED_URLS_SETTING, - URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING, - URLRepository.REPOSITORIES_URL_SETTING, + URLRepository.ALLOWED_URLS_SETTING, + URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING, + URLRepository.REPOSITORIES_URL_SETTING, URLRepository.SUPPORTED_PROTOCOLS_SETTING, TransportMasterNodeReadAction.FORCE_LOCAL_SETTING, AutoCreateIndex.AUTO_CREATE_INDEX_SETTING, @@ -346,6 +360,12 @@ public final class ClusterSettings extends AbstractScopedSettings { FsService.REFRESH_INTERVAL_SETTING, JvmGcMonitorService.ENABLED_SETTING, JvmGcMonitorService.REFRESH_INTERVAL_SETTING, - JvmGcMonitorService.GC_SETTING + JvmGcMonitorService.GC_SETTING, + PageCacheRecycler.LIMIT_HEAP_SETTING, + PageCacheRecycler.WEIGHT_BYTES_SETTING, + PageCacheRecycler.WEIGHT_INT_SETTING, + PageCacheRecycler.WEIGHT_LONG_SETTING, + PageCacheRecycler.WEIGHT_OBJECTS_SETTING, + PageCacheRecycler.TYPE_SETTING ))); } diff --git a/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java b/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java index e1e4571eda4..4f5a3966d43 100644 --- a/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java +++ b/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java @@ -35,6 +35,10 @@ public class PortsRange { this.portRange = portRange; } + public String getPortRangeString() { + return portRange; + } + public int[] ports() throws NumberFormatException { final IntArrayList ports = new IntArrayList(); iterate(new PortCallback() { diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/SuspendableRefContainer.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/SuspendableRefContainer.java new file mode 100644 index 00000000000..2afb78591dd --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/SuspendableRefContainer.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util.concurrent; + +import org.elasticsearch.common.lease.Releasable; + +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Container that represents a resource with reference counting capabilities. Provides operations to suspend acquisition of new references. + * This is useful for resource management when resources are intermittently unavailable. + * + * Assumes less than Integer.MAX_VALUE references are concurrently being held at one point in time. + */ +public final class SuspendableRefContainer { + private static final int TOTAL_PERMITS = Integer.MAX_VALUE; + private final Semaphore semaphore; + + public SuspendableRefContainer() { + // fair semaphore to ensure that blockAcquisition() does not starve under thread contention + this.semaphore = new Semaphore(TOTAL_PERMITS, true); + } + + /** + * Tries acquiring a reference. Returns reference holder if reference acquisition is not blocked at the time of invocation (see + * {@link #blockAcquisition()}). Returns null if reference acquisition is blocked at the time of invocation. + * + * @return reference holder if reference acquisition is not blocked, null otherwise + * @throws InterruptedException if the current thread is interrupted + */ + public Releasable tryAcquire() throws InterruptedException { + if (semaphore.tryAcquire(1, 0, TimeUnit.SECONDS)) { // the untimed tryAcquire methods do not honor the fairness setting + return idempotentRelease(1); + } else { + return null; + } + } + + /** + * Acquires a reference. Blocks if reference acquisition is blocked at the time of invocation. + * + * @return reference holder + * @throws InterruptedException if the current thread is interrupted + */ + public Releasable acquire() throws InterruptedException { + semaphore.acquire(); + return idempotentRelease(1); + } + + /** + * Acquires a reference. Blocks if reference acquisition is blocked at the time of invocation. + * + * @return reference holder + */ + public Releasable acquireUninterruptibly() { + semaphore.acquireUninterruptibly(); + return idempotentRelease(1); + } + + /** + * Disables reference acquisition and waits until all existing references are released. + * When released, reference acquisition is enabled again. + * This guarantees that between successful acquisition and release, no one is holding a reference. + * + * @return references holder to all references + */ + public Releasable blockAcquisition() { + semaphore.acquireUninterruptibly(TOTAL_PERMITS); + return idempotentRelease(TOTAL_PERMITS); + } + + /** + * Helper method that ensures permits are only released once + * + * @return reference holder + */ + private Releasable idempotentRelease(int permits) { + AtomicBoolean closed = new AtomicBoolean(); + return () -> { + if (closed.compareAndSet(false, true)) { + semaphore.release(permits); + } + }; + } + + /** + * Returns the number of references currently being held. + */ + public int activeRefs() { + int availablePermits = semaphore.availablePermits(); + if (availablePermits == 0) { + // when blockAcquisition is holding all permits + return 0; + } else { + return TOTAL_PERMITS - availablePermits; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 55eaf78b7a2..ffa29c857ea 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -89,17 +89,16 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider { - public final static Setting REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER); public final static Setting PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER); public final static Setting JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout", - settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); + settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); public final static Setting JOIN_RETRY_ATTEMPTS_SETTING = Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, false, Setting.Scope.CLUSTER); public final static Setting JOIN_RETRY_DELAY_SETTING = Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), false, Setting.Scope.CLUSTER); public final static Setting MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, false, Setting.Scope.CLUSTER); public final static Setting SEND_LEAVE_REQUEST_SETTING = Setting.boolSetting("discovery.zen.send_leave_request", true, false, Setting.Scope.CLUSTER); public final static Setting MASTER_ELECTION_FILTER_CLIENT_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_client", true, false, Setting.Scope.CLUSTER); public final static Setting MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout", - settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); + settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); public final static Setting MASTER_ELECTION_FILTER_DATA_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_data", false, false, Setting.Scope.CLUSTER); public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin"; @@ -142,8 +141,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen private final AtomicBoolean initialStateSent = new AtomicBoolean(); - private volatile boolean rejoinOnMasterGone; - /** counts the time this node has joined the cluster or have elected it self as master */ private final AtomicLong clusterJoinsCounter = new AtomicLong(); @@ -177,7 +174,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings); this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings); this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings); - this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings); logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes); @@ -188,7 +184,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]"); } }); - clusterSettings.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone); this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService); this.masterFD.addListener(new MasterNodeFailureListener()); @@ -323,10 +318,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen return clusterJoinsCounter.get() > 0; } - private void setRejoingOnMasterGone(boolean rejoin) { - this.rejoinOnMasterGone = rejoin; - } - /** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */ @@ -670,35 +661,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen // flush any pending cluster states from old master, so it will not be set as master again publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("master left [{}]", reason)); - if (rejoinOnMasterGone) { - return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")"); - } - - if (!electMaster.hasEnoughMasterNodes(discoveryNodes)) { - return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "not enough master nodes after master left (reason = " + reason + ")"); - } - - final DiscoveryNode electedMaster = electMaster.electMaster(discoveryNodes); // elect master - final DiscoveryNode localNode = currentState.nodes().localNode(); - if (localNode.equals(electedMaster)) { - masterFD.stop("got elected as new master since master left (reason = " + reason + ")"); - discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(localNode.id()).build(); - ClusterState newState = ClusterState.builder(currentState).nodes(discoveryNodes).build(); - nodesFD.updateNodesAndPing(newState); - return newState; - - } else { - nodesFD.stop(); - if (electedMaster != null) { - discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(electedMaster.id()).build(); - masterFD.restart(electedMaster, "possible elected master since master left (reason = " + reason + ")"); - return ClusterState.builder(currentState) - .nodes(discoveryNodes) - .build(); - } else { - return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master_left and no other node elected to become master"); - } - } + return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")"); } @Override @@ -857,7 +820,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen // Sanity check: maybe we don't end up here, because serialization may have failed. if (node.getVersion().before(minimumNodeJoinVersion)) { callback.onFailure( - new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]") + new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]") ); return; } @@ -1109,10 +1072,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } } - boolean isRejoinOnMasterGone() { - return rejoinOnMasterGone; - } - public static class RejoinClusterRequest extends TransportRequest { private String fromNodeId; diff --git a/core/src/main/java/org/elasticsearch/env/Environment.java b/core/src/main/java/org/elasticsearch/env/Environment.java index 65d62bd9e33..2fc85099e8b 100644 --- a/core/src/main/java/org/elasticsearch/env/Environment.java +++ b/core/src/main/java/org/elasticsearch/env/Environment.java @@ -330,31 +330,4 @@ public class Environment { public static FileStore getFileStore(Path path) throws IOException { return ESFileStore.getMatchingFileStore(path, fileStores); } - - /** - * Returns true if the path is writable. - * Acts just like {@link Files#isWritable(Path)}, except won't - * falsely return false for paths on SUBST'd drive letters - * See https://bugs.openjdk.java.net/browse/JDK-8034057 - * Note this will set the file modification time (to its already-set value) - * to test access. - */ - @SuppressForbidden(reason = "works around https://bugs.openjdk.java.net/browse/JDK-8034057") - public static boolean isWritable(Path path) throws IOException { - boolean v = Files.isWritable(path); - if (v || Constants.WINDOWS == false) { - return v; - } - - // isWritable returned false on windows, the hack begins!!!!!! - // resetting the modification time is the least destructive/simplest - // way to check for both files and directories, and fails early just - // in getting the current value if file doesn't exist, etc - try { - Files.setLastModifiedTime(path, Files.getLastModifiedTime(path)); - return true; - } catch (Throwable e) { - return false; - } - } } diff --git a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java new file mode 100644 index 00000000000..c5a1844f7ff --- /dev/null +++ b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.transport.PortsRange; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; + +public final class HttpTransportSettings { + + public static final Setting SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER); + public static final Setting SETTING_CORS_ALLOW_ORIGIN = new Setting("http.cors.allow-origin", "", (value) -> value, false, Scope.CLUSTER); + public static final Setting SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER); + public static final Setting SETTING_CORS_ALLOW_METHODS = new Setting("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, false, Scope.CLUSTER); + public static final Setting SETTING_CORS_ALLOW_HEADERS = new Setting("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, false, Scope.CLUSTER); + public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER); + public static final Setting SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER); + public static final Setting SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER); + public static final Setting SETTING_HTTP_COMPRESSION = Setting.boolSetting("http.compression", false, false, Scope.CLUSTER); + public static final Setting SETTING_HTTP_COMPRESSION_LEVEL = Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER); + public static final Setting SETTING_HTTP_PORT = new Setting("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER); + public static final Setting SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", 0, 0, false, Scope.CLUSTER); + public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER); + public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ; + public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ; + public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ; + public static final Setting SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), false, Scope.CLUSTER) ; + // don't reset cookies by default, since I don't think we really need to + // note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies + public static final Setting SETTING_HTTP_RESET_COOKIES = Setting.boolSetting("http.reset_cookies", false, false, Scope.CLUSTER); + + private HttpTransportSettings() { + } +} diff --git a/core/src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java b/core/src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java index 71d63d8d1dc..17e14fe83f1 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java +++ b/core/src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java @@ -20,6 +20,7 @@ package org.elasticsearch.http.netty; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent; import org.elasticsearch.rest.support.RestUtils; import org.jboss.netty.channel.ChannelHandler; @@ -46,7 +47,8 @@ public class HttpRequestHandler extends SimpleChannelUpstreamHandler { public HttpRequestHandler(NettyHttpServerTransport serverTransport, boolean detailedErrorsEnabled, ThreadContext threadContext) { this.serverTransport = serverTransport; - this.corsPattern = RestUtils.checkCorsSettingForRegex(serverTransport.settings().get(NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN)); + this.corsPattern = RestUtils + .checkCorsSettingForRegex(HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN.get(serverTransport.settings())); this.httpPipeliningEnabled = serverTransport.pipelining; this.detailedErrorsEnabled = detailedErrorsEnabled; this.threadContext = threadContext; diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java index 316799dd062..1d3a2966e34 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java @@ -49,12 +49,12 @@ import java.util.Map; import java.util.Set; import java.util.regex.Pattern; -import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS; -import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_HEADERS; -import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_METHODS; -import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN; -import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ENABLED; -import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_MAX_AGE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE; import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS; import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS; import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS; @@ -117,7 +117,7 @@ public class NettyHttpChannel extends HttpChannel { String originHeader = request.header(ORIGIN); if (!Strings.isNullOrEmpty(originHeader)) { if (corsPattern == null) { - String allowedOrigins = transport.settings().get(SETTING_CORS_ALLOW_ORIGIN, null); + String allowedOrigins = SETTING_CORS_ALLOW_ORIGIN.get(transport.settings()); if (!Strings.isNullOrEmpty(allowedOrigins)) { resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, allowedOrigins); } @@ -128,8 +128,8 @@ public class NettyHttpChannel extends HttpChannel { if (nettyRequest.getMethod() == HttpMethod.OPTIONS) { // Allow Ajax requests based on the CORS "preflight" request resp.headers().add(ACCESS_CONTROL_MAX_AGE, SETTING_CORS_MAX_AGE.get(transport.settings())); - resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, transport.settings().get(SETTING_CORS_ALLOW_METHODS, "OPTIONS, HEAD, GET, POST, PUT, DELETE")); - resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, transport.settings().get(SETTING_CORS_ALLOW_HEADERS, "X-Requested-With, Content-Type, Content-Length")); + resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, SETTING_CORS_ALLOW_METHODS.get(transport.settings())); + resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, SETTING_CORS_ALLOW_HEADERS.get(transport.settings())); } if (SETTING_CORS_ALLOW_CREDENTIALS.get(transport.settings())) { diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java index 00b3c0f8afa..83e6823f6f0 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java @@ -26,8 +26,6 @@ import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.common.netty.OpenChannelsHandler; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -46,6 +44,7 @@ import org.elasticsearch.http.HttpRequest; import org.elasticsearch.http.HttpServerAdapter; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; +import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.threadpool.ThreadPool; @@ -75,7 +74,6 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicReference; - import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING; import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE; import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY; @@ -93,22 +91,6 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER); - public static final String SETTING_CORS_ALLOW_ORIGIN = "http.cors.allow-origin"; - public static final Setting SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER); - public static final String SETTING_CORS_ALLOW_METHODS = "http.cors.allow-methods"; - public static final String SETTING_CORS_ALLOW_HEADERS = "http.cors.allow-headers"; - public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER); - - public static final Setting SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER); - public static final String SETTING_PIPELINING_MAX_EVENTS = "http.pipelining.max_events"; - public static final String SETTING_HTTP_COMPRESSION = "http.compression"; - public static final String SETTING_HTTP_COMPRESSION_LEVEL = "http.compression_level"; - public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER); - - public static final int DEFAULT_SETTING_PIPELINING_MAX_EVENTS = 10000; - public static final String DEFAULT_PORT_RANGE = "9200-9300"; - protected final NetworkService networkService; protected final BigArrays bigArrays; @@ -131,7 +113,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent 0) { @@ -215,10 +194,10 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent Integer.MAX_VALUE) { @@ -312,10 +291,9 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = portsRange.iterate(new PortsRange.PortCallback() { + boolean success = port.iterate(new PortsRange.PortCallback() { @Override public boolean onPortNumber(int portNumber) { try { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 03c7e4e82e1..46764eaed92 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -42,17 +42,17 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; -import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.SuspendableRefContainer; import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; @@ -189,9 +189,17 @@ public class IndexShard extends AbstractIndexShardComponent { private final ShardPath path; - private final IndexShardOperationCounter indexShardOperationCounter; + private final SuspendableRefContainer suspendableRefContainer; - private final EnumSet readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY); + private static final EnumSet readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY); + // for primaries, we only allow to write when actually started (so the cluster has decided we started) + // in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be + // in state RECOVERING or POST_RECOVERY. After a primary has been marked as RELOCATED, we only allow writes to the relocation target + // which can be either in POST_RECOVERY or already STARTED (this prevents writing concurrently to two primaries). + public static final EnumSet writeAllowedStatesForPrimary = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); + // replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent + // a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source + private static final EnumSet writeAllowedStatesForReplica = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); private final IndexSearcherWrapper searcherWrapper; @@ -250,7 +258,7 @@ public class IndexShard extends AbstractIndexShardComponent { } this.engineConfig = newEngineConfig(translogConfig, cachingPolicy); - this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId); + this.suspendableRefContainer = new SuspendableRefContainer(); this.provider = provider; this.searcherWrapper = indexSearcherWrapper; this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, newQueryShardContext()); @@ -321,6 +329,8 @@ public class IndexShard extends AbstractIndexShardComponent { * Updates the shards routing entry. This mutate the shards internal state depending * on the changes that get introduced by the new routing value. This method will persist shard level metadata * unless explicitly disabled. + * + * @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted */ public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) { final ShardRouting currentRouting = this.shardRouting; @@ -368,6 +378,14 @@ public class IndexShard extends AbstractIndexShardComponent { } } } + + if (state == IndexShardState.RELOCATED && + (newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) { + // if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery + // failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two + // active primaries. + throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state()); + } this.shardRouting = newRouting; indexEventListener.shardRoutingChanged(this, currentRouting, newRouting); } finally { @@ -404,12 +422,16 @@ public class IndexShard extends AbstractIndexShardComponent { } public IndexShard relocated(String reason) throws IndexShardNotStartedException { - synchronized (mutex) { - if (state != IndexShardState.STARTED) { - throw new IndexShardNotStartedException(shardId, state); + try (Releasable block = suspendableRefContainer.blockAcquisition()) { + // no shard operation locks are being held here, move state from started to relocated + synchronized (mutex) { + if (state != IndexShardState.STARTED) { + throw new IndexShardNotStartedException(shardId, state); + } + changeState(IndexShardState.RELOCATED, reason); } - changeState(IndexShardState.RELOCATED, reason); } + return this; } @@ -796,7 +818,6 @@ public class IndexShard extends AbstractIndexShardComponent { refreshScheduledFuture = null; } changeState(IndexShardState.CLOSED, reason); - indexShardOperationCounter.decRef(); } finally { final Engine engine = this.currentEngineReference.getAndSet(null); try { @@ -810,7 +831,6 @@ public class IndexShard extends AbstractIndexShardComponent { } } - public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) { refresh("percolator_load_queries"); @@ -967,16 +987,17 @@ public class IndexShard extends AbstractIndexShardComponent { IndexShardState state = this.state; // one time volatile read if (origin == Engine.Operation.Origin.PRIMARY) { - // for primaries, we only allow to write when actually started (so the cluster has decided we started) - // otherwise, we need to retry, we also want to still allow to index if we are relocated in case it fails - if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]"); + if (writeAllowedStatesForPrimary.contains(state) == false) { + throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForPrimary + ", origin [" + origin + "]"); + } + } else if (origin == Engine.Operation.Origin.RECOVERY) { + if (state != IndexShardState.RECOVERING) { + throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]"); } } else { - // for replicas, we allow to write also while recovering, since we index also during recovery to replicas - // and rely on version checks to make sure its consistent - if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]"); + assert origin == Engine.Operation.Origin.REPLICA; + if (writeAllowedStatesForReplica.contains(state) == false) { + throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForReplica + ", origin [" + origin + "]"); } } } @@ -995,7 +1016,7 @@ public class IndexShard extends AbstractIndexShardComponent { private void verifyNotClosed(Throwable suppressed) throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read if (state == IndexShardState.CLOSED) { - final IllegalIndexShardStateException exc = new IllegalIndexShardStateException(shardId, state, "operation only allowed when not closed"); + final IllegalIndexShardStateException exc = new IndexShardClosedException(shardId, "operation only allowed when not closed"); if (suppressed != null) { exc.addSuppressed(suppressed); } @@ -1390,37 +1411,21 @@ public class IndexShard extends AbstractIndexShardComponent { idxSettings.getSettings().getAsTime(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING, IndexingMemoryController.SHARD_DEFAULT_INACTIVE_TIME)); } - private static class IndexShardOperationCounter extends AbstractRefCounted { - final private ESLogger logger; - private final ShardId shardId; - - public IndexShardOperationCounter(ESLogger logger, ShardId shardId) { - super("index-shard-operations-counter"); - this.logger = logger; - this.shardId = shardId; - } - - @Override - protected void closeInternal() { - logger.debug("operations counter reached 0, will not accept any further writes"); - } - - @Override - protected void alreadyClosed() { - throw new IndexShardClosedException(shardId, "could not increment operation counter. shard is closed."); + public Releasable acquirePrimaryOperationLock() { + verifyNotClosed(); + if (shardRouting.primary() == false) { + throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary"); } + return suspendableRefContainer.acquireUninterruptibly(); } - public void incrementOperationCounter() { - indexShardOperationCounter.incRef(); + public Releasable acquireReplicaOperationLock() { + verifyNotClosed(); + return suspendableRefContainer.acquireUninterruptibly(); } - public void decrementOperationCounter() { - indexShardOperationCounter.decRef(); - } - - public int getOperationsCount() { - return Math.max(0, indexShardOperationCounter.refCount() - 1); // refCount is incremented on creation and decremented on close + public int getActiveOperationsCount() { + return suspendableRefContainer.activeRefs(); // refCount is incremented on creation and decremented on close } /** diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java index 2d3c48cd4c5..043ad892777 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java @@ -29,10 +29,14 @@ import java.io.IOException; public class IndexShardRelocatedException extends IllegalIndexShardStateException { public IndexShardRelocatedException(ShardId shardId) { - super(shardId, IndexShardState.RELOCATED, "Already relocated"); + this(shardId, "Already relocated"); + } + + public IndexShardRelocatedException(ShardId shardId, String reason) { + super(shardId, IndexShardState.RELOCATED, reason); } public IndexShardRelocatedException(StreamInput in) throws IOException{ super(in); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardId.java b/core/src/main/java/org/elasticsearch/index/shard/ShardId.java index f021cb4c162..3dea5501c62 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardId.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardId.java @@ -76,7 +76,7 @@ public class ShardId implements Streamable, Comparable { if (this == o) return true; if (o == null) return false; ShardId shardId1 = (ShardId) o; - return shardId == shardId1.shardId && index.getName().equals(shardId1.index.getName()); + return shardId == shardId1.shardId && index.equals(shardId1.index); } @Override @@ -112,8 +112,12 @@ public class ShardId implements Streamable, Comparable { @Override public int compareTo(ShardId o) { if (o.getId() == shardId) { - return index.getName().compareTo(o.getIndex().getName()); + int compare = index.getName().compareTo(o.getIndex().getName()); + if (compare != 0) { + return compare; + } + return index.getUUID().compareTo(o.getIndex().getUUID()); } return Integer.compare(shardId, o.getId()); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 8c2f23f7081..98bbd5fe000 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -39,13 +39,11 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.IndexService; @@ -93,26 +91,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent, Boolean> seenMappings = ConcurrentCollections.newConcurrentMap(); - // a list of shards that failed during recovery // we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update - private final ConcurrentMap failedShards = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap failedShards = ConcurrentCollections.newConcurrentMap(); private final RestoreService restoreService; private final RepositoriesService repositoriesService; - static class FailedShard { - public final long version; - public final long timestamp; - - FailedShard(long version) { - this.version = version; - this.timestamp = System.currentTimeMillis(); - } - } - private final Object mutex = new Object(); private final FailedShardHandler failedShardHandler = new FailedShardHandler(); @@ -322,7 +306,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent> iterator = failedShards.entrySet().iterator(); - shards: - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - FailedShard failedShard = entry.getValue(); - IndexRoutingTable indexRoutingTable = routingTable.index(entry.getKey().getIndex()); - if (indexRoutingTable != null) { - IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(entry.getKey().id()); - if (shardRoutingTable != null) { - for (ShardRouting shardRouting : shardRoutingTable.assignedShards()) { - if (localNodeId.equals(shardRouting.currentNodeId())) { - // we have a timeout here just to make sure we don't have dangled failed shards for some reason - // its just another safely layer - if (shardRouting.version() == failedShard.version && ((now - failedShard.timestamp) < TimeValue.timeValueMinutes(60).millis())) { - // It's the same failed shard - keep it if it hasn't timed out - continue shards; - } else { - // Different version or expired, remove it - break; - } - } - } - } + RoutingTable routingTable = event.state().routingTable(); + for (Iterator> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) { + Map.Entry entry = iterator.next(); + ShardId failedShardId = entry.getKey(); + ShardRouting failedShardRouting = entry.getValue(); + IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex()); + if (indexRoutingTable == null) { + iterator.remove(); + continue; + } + IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id()); + if (shardRoutingTable == null) { + iterator.remove(); + continue; + } + if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) { + iterator.remove(); } - iterator.remove(); } } @@ -561,7 +540,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent { try { if (indexShard.recoverFromStore(nodes.localNode())) { - shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery from store", SHARD_STATE_ACTION_LISTENER); + shardStateAction.shardStarted(shardRouting, "after recovery from store", SHARD_STATE_ACTION_LISTENER); } } catch (Throwable t) { handleRecoveryFailure(indexService, shardRouting, true, t); @@ -662,7 +642,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent { synchronized (mutex) { - failAndRemoveShard(shardRouting, shardFailure.indexUUID, indexService, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause); + failAndRemoveShard(shardRouting, indexService, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause); } }); } diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 90f2cb5073b..6eb7c88a2a4 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -435,7 +435,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL if (indexShard.routingEntry().primary() == false) { throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard"); } - int opCount = indexShard.getOperationsCount(); + int opCount = indexShard.getActiveOperationsCount(); logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount); return new InFlightOpsResponse(opCount); } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java index 4c2d3d7f60b..f24452bf007 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java @@ -61,8 +61,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe private final ClusterService clusterService; - private final OngoingRecoveres ongoingRecoveries = new OngoingRecoveres(); - + private final OngoingRecoveries ongoingRecoveries = new OngoingRecoveries(); @Inject public RecoverySource(Settings settings, TransportService transportService, IndicesService indicesService, @@ -107,11 +106,11 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe } if (!targetShardRouting.initializing()) { logger.debug("delaying recovery of {} as it is not listed as initializing on the target node {}. known shards state is [{}]", - request.shardId(), request.targetNode(), targetShardRouting.state()); + request.shardId(), request.targetNode(), targetShardRouting.state()); throw new DelayRecoveryException("source node has the state of the target shard to be [" + targetShardRouting.state() + "], expecting to be [initializing]"); } - logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode(), request.markAsRelocated()); + logger.trace("[{}][{}] starting recovery to {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode()); final RecoverySourceHandler handler; if (shard.indexSettings().isOnSharedFilesystem()) { handler = new SharedFSRecoverySourceHandler(shard, request, recoverySettings, transportService, logger); @@ -134,8 +133,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe } } - - private static final class OngoingRecoveres { + private static final class OngoingRecoveries { private final Map> ongoingRecoveries = new HashMap<>(); synchronized void add(IndexShard shard, RecoverySourceHandler handler) { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 8cbdfca0221..26c288cfbca 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -393,9 +393,11 @@ public class RecoverySourceHandler { } }); - - if (request.markAsRelocated()) { - // TODO what happens if the recovery process fails afterwards, we need to mark this back to started + if (isPrimaryRelocation()) { + /** + * if the recovery process fails after setting the shard state to RELOCATED, both relocation source and + * target are failed (see {@link IndexShard#updateRoutingEntry}). + */ try { shard.relocated("to " + request.targetNode()); } catch (IllegalIndexShardStateException e) { @@ -406,7 +408,11 @@ public class RecoverySourceHandler { } stopWatch.stop(); logger.trace("[{}][{}] finalizing recovery to {}: took [{}]", - indexName, shardId, request.targetNode(), stopWatch.totalTime()); + indexName, shardId, request.targetNode(), stopWatch.totalTime()); + } + + protected boolean isPrimaryRelocation() { + return request.recoveryType() == RecoveryState.Type.PRIMARY_RELOCATION; } /** diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index 92bfc87218a..d1c41d4b932 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -101,7 +101,7 @@ public class RecoveryState implements ToXContent, Streamable { STORE((byte) 0), SNAPSHOT((byte) 1), REPLICA((byte) 2), - RELOCATION((byte) 3); + PRIMARY_RELOCATION((byte) 3); private static final Type[] TYPES = new Type[Type.values().length]; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 0912a22a0f5..727bd0b6441 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -138,7 +138,6 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe // create a new recovery status, and process... final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout()); threadPool.generic().execute(new RecoveryRunner(recoveryId)); - } protected void retryRecovery(final RecoveryStatus recoveryStatus, final Throwable reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) { @@ -178,7 +177,7 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe return; } final StartRecoveryRequest request = new StartRecoveryRequest(recoveryStatus.shardId(), recoveryStatus.sourceNode(), clusterService.localNode(), - false, metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId()); + metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId()); final AtomicReference responseHolder = new AtomicReference<>(); try { @@ -267,7 +266,6 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source shard is closed", cause), false); return; } - onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, e), true); } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java index 16bd1d46553..8d75c474791 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java @@ -84,8 +84,4 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { return 0; } - private boolean isPrimaryRelocation() { - return request.recoveryType() == RecoveryState.Type.RELOCATION && shard.routingEntry().primary(); - } - } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java index 3a62f4f6352..49dd70a73f7 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java @@ -41,8 +41,6 @@ public class StartRecoveryRequest extends TransportRequest { private DiscoveryNode targetNode; - private boolean markAsRelocated; - private Store.MetadataSnapshot metadataSnapshot; private RecoveryState.Type recoveryType; @@ -56,12 +54,11 @@ public class StartRecoveryRequest extends TransportRequest { * @param sourceNode The node to recover from * @param targetNode The node to recover to */ - public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, boolean markAsRelocated, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) { + public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) { this.recoveryId = recoveryId; this.shardId = shardId; this.sourceNode = sourceNode; this.targetNode = targetNode; - this.markAsRelocated = markAsRelocated; this.recoveryType = recoveryType; this.metadataSnapshot = metadataSnapshot; } @@ -82,10 +79,6 @@ public class StartRecoveryRequest extends TransportRequest { return targetNode; } - public boolean markAsRelocated() { - return markAsRelocated; - } - public RecoveryState.Type recoveryType() { return recoveryType; } @@ -101,7 +94,6 @@ public class StartRecoveryRequest extends TransportRequest { shardId = ShardId.readShardId(in); sourceNode = DiscoveryNode.readNode(in); targetNode = DiscoveryNode.readNode(in); - markAsRelocated = in.readBoolean(); metadataSnapshot = new Store.MetadataSnapshot(in); recoveryType = RecoveryState.Type.fromId(in.readByte()); @@ -114,7 +106,6 @@ public class StartRecoveryRequest extends TransportRequest { shardId.writeTo(out); sourceNode.writeTo(out); targetNode.writeTo(out); - out.writeBoolean(markAsRelocated); metadataSnapshot.writeTo(out); out.writeByte(recoveryType.id()); } diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java index 21128a94b65..e2d68199f43 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ingest.DeletePipelineRequest; @@ -36,10 +37,8 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.ingest.core.Pipeline; -import org.elasticsearch.ingest.core.PipelineFactoryError; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.ingest.core.TemplateService; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; import org.elasticsearch.script.ScriptService; import java.io.Closeable; @@ -104,8 +103,10 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { try { pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactoryRegistry)); + } catch (ElasticsearchParseException e) { + throw e; } catch (Exception e) { - throw new RuntimeException(e); + throw new ElasticsearchParseException("Error updating pipeline with id [" + pipeline.getId() + "]", e); } } this.pipelines = Collections.unmodifiableMap(pipelines); @@ -154,9 +155,10 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust public void put(ClusterService clusterService, PutPipelineRequest request, ActionListener listener) { // validates the pipeline and processor configuration before submitting a cluster update task: Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false).v2(); - WritePipelineResponse response = validatePipelineResponse(request.getId(), pipelineConfig); - if (response != null) { - listener.onResponse(response); + try { + factory.create(request.getId(), pipelineConfig, processorFactoryRegistry); + } catch(Exception e) { + listener.onFailure(e); return; } clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), new AckedClusterStateUpdateTask(request, listener) { @@ -234,16 +236,4 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust } return result; } - - WritePipelineResponse validatePipelineResponse(String id, Map config) { - try { - factory.create(id, config, processorFactoryRegistry); - return null; - } catch (ConfigurationPropertyException e) { - return new WritePipelineResponse(new PipelineFactoryError(e)); - } catch (Exception e) { - return new WritePipelineResponse(new PipelineFactoryError(e.getMessage())); - } - } - } diff --git a/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java b/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java index 699720e18ca..c784ea1c57a 100644 --- a/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java +++ b/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java @@ -32,7 +32,8 @@ import java.util.Objects; */ public class CompoundProcessor implements Processor { static final String ON_FAILURE_MESSAGE_FIELD = "on_failure_message"; - static final String ON_FAILURE_PROCESSOR_FIELD = "on_failure_processor"; + static final String ON_FAILURE_PROCESSOR_TYPE_FIELD = "on_failure_processor_type"; + static final String ON_FAILURE_PROCESSOR_TAG_FIELD = "on_failure_processor_tag"; private final List processors; private final List onFailureProcessors; @@ -74,24 +75,26 @@ public class CompoundProcessor implements Processor { if (onFailureProcessors.isEmpty()) { throw e; } else { - executeOnFailure(ingestDocument, e, processor.getType()); + executeOnFailure(ingestDocument, e, processor.getType(), processor.getTag()); } break; } } } - void executeOnFailure(IngestDocument ingestDocument, Exception cause, String failedProcessorType) throws Exception { + void executeOnFailure(IngestDocument ingestDocument, Exception cause, String failedProcessorType, String failedProcessorTag) throws Exception { Map ingestMetadata = ingestDocument.getIngestMetadata(); try { ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getMessage()); - ingestMetadata.put(ON_FAILURE_PROCESSOR_FIELD, failedProcessorType); + ingestMetadata.put(ON_FAILURE_PROCESSOR_TYPE_FIELD, failedProcessorType); + ingestMetadata.put(ON_FAILURE_PROCESSOR_TAG_FIELD, failedProcessorTag); for (Processor processor : onFailureProcessors) { processor.execute(ingestDocument); } } finally { ingestMetadata.remove(ON_FAILURE_MESSAGE_FIELD); - ingestMetadata.remove(ON_FAILURE_PROCESSOR_FIELD); + ingestMetadata.remove(ON_FAILURE_PROCESSOR_TYPE_FIELD); + ingestMetadata.remove(ON_FAILURE_PROCESSOR_TAG_FIELD); } } } diff --git a/core/src/main/java/org/elasticsearch/ingest/core/ConfigurationUtils.java b/core/src/main/java/org/elasticsearch/ingest/core/ConfigurationUtils.java index 69adc0f9492..bd3fd8cfb6e 100644 --- a/core/src/main/java/org/elasticsearch/ingest/core/ConfigurationUtils.java +++ b/core/src/main/java/org/elasticsearch/ingest/core/ConfigurationUtils.java @@ -19,7 +19,8 @@ package org.elasticsearch.ingest.core; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; import java.util.List; import java.util.Map; @@ -32,7 +33,7 @@ public final class ConfigurationUtils { /** * Returns and removes the specified optional property from the specified configuration map. * - * If the property value isn't of type string a {@link ConfigurationPropertyException} is thrown. + * If the property value isn't of type string a {@link ElasticsearchParseException} is thrown. */ public static String readOptionalStringProperty(String processorType, String processorTag, Map configuration, String propertyName) { Object value = configuration.remove(propertyName); @@ -42,8 +43,8 @@ public final class ConfigurationUtils { /** * Returns and removes the specified property from the specified configuration map. * - * If the property value isn't of type string an {@link ConfigurationPropertyException} is thrown. - * If the property is missing an {@link ConfigurationPropertyException} is thrown + * If the property value isn't of type string an {@link ElasticsearchParseException} is thrown. + * If the property is missing an {@link ElasticsearchParseException} is thrown */ public static String readStringProperty(String processorType, String processorTag, Map configuration, String propertyName) { return readStringProperty(processorType, processorTag, configuration, propertyName, null); @@ -52,15 +53,15 @@ public final class ConfigurationUtils { /** * Returns and removes the specified property from the specified configuration map. * - * If the property value isn't of type string a {@link ConfigurationPropertyException} is thrown. - * If the property is missing and no default value has been specified a {@link ConfigurationPropertyException} is thrown + * If the property value isn't of type string a {@link ElasticsearchParseException} is thrown. + * If the property is missing and no default value has been specified a {@link ElasticsearchParseException} is thrown */ public static String readStringProperty(String processorType, String processorTag, Map configuration, String propertyName, String defaultValue) { Object value = configuration.remove(propertyName); if (value == null && defaultValue != null) { return defaultValue; } else if (value == null) { - throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing"); + throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing"); } return readString(processorType, processorTag, propertyName, value); } @@ -72,13 +73,13 @@ public final class ConfigurationUtils { if (value instanceof String) { return (String) value; } - throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a string, but of type [" + value.getClass().getName() + "]"); + throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a string, but of type [" + value.getClass().getName() + "]"); } /** * Returns and removes the specified property of type list from the specified configuration map. * - * If the property value isn't of type list an {@link ConfigurationPropertyException} is thrown. + * If the property value isn't of type list an {@link ElasticsearchParseException} is thrown. */ public static List readOptionalList(String processorType, String processorTag, Map configuration, String propertyName) { Object value = configuration.remove(propertyName); @@ -91,13 +92,13 @@ public final class ConfigurationUtils { /** * Returns and removes the specified property of type list from the specified configuration map. * - * If the property value isn't of type list an {@link ConfigurationPropertyException} is thrown. - * If the property is missing an {@link ConfigurationPropertyException} is thrown + * If the property value isn't of type list an {@link ElasticsearchParseException} is thrown. + * If the property is missing an {@link ElasticsearchParseException} is thrown */ public static List readList(String processorType, String processorTag, Map configuration, String propertyName) { Object value = configuration.remove(propertyName); if (value == null) { - throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing"); + throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing"); } return readList(processorType, processorTag, propertyName, value); @@ -109,20 +110,20 @@ public final class ConfigurationUtils { List stringList = (List) value; return stringList; } else { - throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a list, but of type [" + value.getClass().getName() + "]"); + throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a list, but of type [" + value.getClass().getName() + "]"); } } /** * Returns and removes the specified property of type map from the specified configuration map. * - * If the property value isn't of type map an {@link ConfigurationPropertyException} is thrown. - * If the property is missing an {@link ConfigurationPropertyException} is thrown + * If the property value isn't of type map an {@link ElasticsearchParseException} is thrown. + * If the property is missing an {@link ElasticsearchParseException} is thrown */ public static Map readMap(String processorType, String processorTag, Map configuration, String propertyName) { Object value = configuration.remove(propertyName); if (value == null) { - throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing"); + throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing"); } return readMap(processorType, processorTag, propertyName, value); @@ -131,7 +132,7 @@ public final class ConfigurationUtils { /** * Returns and removes the specified property of type map from the specified configuration map. * - * If the property value isn't of type map an {@link ConfigurationPropertyException} is thrown. + * If the property value isn't of type map an {@link ElasticsearchParseException} is thrown. */ public static Map readOptionalMap(String processorType, String processorTag, Map configuration, String propertyName) { Object value = configuration.remove(propertyName); @@ -148,7 +149,7 @@ public final class ConfigurationUtils { Map map = (Map) value; return map; } else { - throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a map, but of type [" + value.getClass().getName() + "]"); + throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a map, but of type [" + value.getClass().getName() + "]"); } } @@ -158,8 +159,23 @@ public final class ConfigurationUtils { public static Object readObject(String processorType, String processorTag, Map configuration, String propertyName) { Object value = configuration.remove(propertyName); if (value == null) { - throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing"); + throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing"); } return value; } + + public static ElasticsearchParseException newConfigurationException(String processorType, String processorTag, String propertyName, String reason) { + ElasticsearchParseException exception = new ElasticsearchParseException("[" + propertyName + "] " + reason); + + if (processorType != null) { + exception.addHeader("processor_type", processorType); + } + if (processorTag != null) { + exception.addHeader("processor_tag", processorTag); + } + if (propertyName != null) { + exception.addHeader("property_name", propertyName); + } + return exception; + } } diff --git a/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java b/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java index 5c654fbce21..1c560fa6bcc 100644 --- a/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java +++ b/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java @@ -19,7 +19,7 @@ package org.elasticsearch.ingest.core; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; +import org.elasticsearch.ElasticsearchParseException; import java.util.ArrayList; import java.util.Arrays; @@ -27,6 +27,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; + /** * A pipeline is a list of {@link Processor} instances grouped under a unique id. */ @@ -84,20 +85,20 @@ public final class Pipeline { public final static class Factory { - public Pipeline create(String id, Map config, Map processorRegistry) throws ConfigurationPropertyException { + public Pipeline create(String id, Map config, Map processorRegistry) throws Exception { String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); List>> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY); List processors = readProcessorConfigs(processorConfigs, processorRegistry); List>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); List onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry); if (config.isEmpty() == false) { - throw new ConfigurationPropertyException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); + throw new ElasticsearchParseException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); } CompoundProcessor compoundProcessor = new CompoundProcessor(Collections.unmodifiableList(processors), Collections.unmodifiableList(onFailureProcessors)); return new Pipeline(id, description, compoundProcessor); } - private List readProcessorConfigs(List>> processorConfigs, Map processorRegistry) throws ConfigurationPropertyException { + private List readProcessorConfigs(List>> processorConfigs, Map processorRegistry) throws Exception { List processors = new ArrayList<>(); if (processorConfigs != null) { for (Map> processorConfigWithKey : processorConfigs) { @@ -110,28 +111,22 @@ public final class Pipeline { return processors; } - private Processor readProcessor(Map processorRegistry, String type, Map config) throws ConfigurationPropertyException { + private Processor readProcessor(Map processorRegistry, String type, Map config) throws Exception { Processor.Factory factory = processorRegistry.get(type); if (factory != null) { List>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); List onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry); Processor processor; - try { - processor = factory.create(config); - } catch (ConfigurationPropertyException e) { - throw e; - } catch (Exception e) { - throw new ConfigurationPropertyException(e.getMessage()); - } + processor = factory.create(config); if (!config.isEmpty()) { - throw new ConfigurationPropertyException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); + throw new ElasticsearchParseException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); } if (onFailureProcessors.isEmpty()) { return processor; } return new CompoundProcessor(Collections.singletonList(processor), onFailureProcessors); } - throw new ConfigurationPropertyException("No processor type exists with name [" + type + "]"); + throw new ElasticsearchParseException("No processor type exists with name [" + type + "]"); } } } diff --git a/core/src/main/java/org/elasticsearch/ingest/core/PipelineFactoryError.java b/core/src/main/java/org/elasticsearch/ingest/core/PipelineFactoryError.java deleted file mode 100644 index b987e1ee266..00000000000 --- a/core/src/main/java/org/elasticsearch/ingest/core/PipelineFactoryError.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest.core; - - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; - -import java.io.IOException; - -public class PipelineFactoryError implements Streamable, ToXContent { - private String reason; - private String processorType; - private String processorTag; - private String processorPropertyName; - - public PipelineFactoryError() { - - } - - public PipelineFactoryError(ConfigurationPropertyException e) { - this.reason = e.getMessage(); - this.processorType = e.getProcessorType(); - this.processorTag = e.getProcessorTag(); - this.processorPropertyName = e.getPropertyName(); - } - - public PipelineFactoryError(String reason) { - this.reason = "Constructing Pipeline failed:" + reason; - } - - public String getReason() { - return reason; - } - - public String getProcessorTag() { - return processorTag; - } - - public String getProcessorPropertyName() { - return processorPropertyName; - } - - public String getProcessorType() { - return processorType; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - reason = in.readString(); - processorType = in.readOptionalString(); - processorTag = in.readOptionalString(); - processorPropertyName = in.readOptionalString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(reason); - out.writeOptionalString(processorType); - out.writeOptionalString(processorTag); - out.writeOptionalString(processorPropertyName); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject("error"); - builder.field("type", processorType); - builder.field("tag", processorTag); - builder.field("reason", reason); - builder.field("property_name", processorPropertyName); - builder.endObject(); - return builder; - } -} diff --git a/core/src/main/java/org/elasticsearch/ingest/core/PipelineFactoryResult.java b/core/src/main/java/org/elasticsearch/ingest/core/PipelineFactoryResult.java deleted file mode 100644 index ab284981b33..00000000000 --- a/core/src/main/java/org/elasticsearch/ingest/core/PipelineFactoryResult.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest.core; - -public class PipelineFactoryResult { - private final Pipeline pipeline; - private final PipelineFactoryError error; - - public PipelineFactoryResult(Pipeline pipeline) { - this.pipeline = pipeline; - this.error = null; - } - - public PipelineFactoryResult(PipelineFactoryError error) { - this.error = error; - this.pipeline = null; - } - - public Pipeline getPipeline() { - return pipeline; - } - - public PipelineFactoryError getError() { - return error; - } -} diff --git a/core/src/main/java/org/elasticsearch/ingest/core/Processor.java b/core/src/main/java/org/elasticsearch/ingest/core/Processor.java index 28049983692..8cdff8714c4 100644 --- a/core/src/main/java/org/elasticsearch/ingest/core/Processor.java +++ b/core/src/main/java/org/elasticsearch/ingest/core/Processor.java @@ -17,11 +17,8 @@ * under the License. */ - package org.elasticsearch.ingest.core; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; - import java.util.Map; /** diff --git a/core/src/main/java/org/elasticsearch/ingest/processor/ConfigurationPropertyException.java b/core/src/main/java/org/elasticsearch/ingest/processor/ConfigurationPropertyException.java deleted file mode 100644 index dbc35c9334f..00000000000 --- a/core/src/main/java/org/elasticsearch/ingest/processor/ConfigurationPropertyException.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest.processor; - -/** - * Exception class thrown by processor factories. - */ -public class ConfigurationPropertyException extends RuntimeException { - private String processorType; - private String processorTag; - private String propertyName; - - public ConfigurationPropertyException(String processorType, String processorTag, String propertyName, String message) { - super("[" + propertyName + "] " + message); - this.processorTag = processorTag; - this.processorType = processorType; - this.propertyName = propertyName; - } - - public ConfigurationPropertyException(String errorMessage) { - super(errorMessage); - } - - public String getPropertyName() { - return propertyName; - } - - public String getProcessorType() { - return processorType; - } - - public String getProcessorTag() { - return processorTag; - } -} - diff --git a/core/src/main/java/org/elasticsearch/ingest/processor/ConvertProcessor.java b/core/src/main/java/org/elasticsearch/ingest/processor/ConvertProcessor.java index 213e3ec2c78..7cc85909a32 100644 --- a/core/src/main/java/org/elasticsearch/ingest/processor/ConvertProcessor.java +++ b/core/src/main/java/org/elasticsearch/ingest/processor/ConvertProcessor.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessor; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.IngestDocument; @@ -29,6 +30,8 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import static org.elasticsearch.ingest.core.ConfigurationUtils.newConfigurationException; + /** * Processor that converts fields content to a different type. Supported types are: integer, float, boolean and string. * Throws exception if the field is not there or the conversion fails. @@ -80,11 +83,11 @@ public class ConvertProcessor extends AbstractProcessor { public abstract Object convert(Object value); - public static Type fromString(String type) { + public static Type fromString(String processorTag, String propertyName, String type) { try { return Type.valueOf(type.toUpperCase(Locale.ROOT)); } catch(IllegalArgumentException e) { - throw new IllegalArgumentException("type [" + type + "] not supported, cannot convert field.", e); + throw newConfigurationException(TYPE, processorTag, propertyName, "type [" + type + "] not supported, cannot convert field."); } } } @@ -138,7 +141,8 @@ public class ConvertProcessor extends AbstractProcessor { @Override public ConvertProcessor doCreate(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); - Type convertType = Type.fromString(ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type")); + String typeProperty = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type"); + Type convertType = Type.fromString(processorTag, "type", typeProperty); return new ConvertProcessor(processorTag, field, convertType); } } diff --git a/core/src/main/java/org/elasticsearch/ingest/processor/DeDotProcessor.java b/core/src/main/java/org/elasticsearch/ingest/processor/DeDotProcessor.java deleted file mode 100644 index 62063a49fd0..00000000000 --- a/core/src/main/java/org/elasticsearch/ingest/processor/DeDotProcessor.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest.processor; - -import org.elasticsearch.ingest.core.AbstractProcessor; -import org.elasticsearch.ingest.core.AbstractProcessorFactory; -import org.elasticsearch.ingest.core.ConfigurationUtils; -import org.elasticsearch.ingest.core.IngestDocument; - -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - -/** - * Processor that replaces dots in document field names with a - * specified separator. - */ -public class DeDotProcessor extends AbstractProcessor { - - public static final String TYPE = "dedot"; - static final String DEFAULT_SEPARATOR = "_"; - - private final String separator; - - DeDotProcessor(String tag, String separator) { - super(tag); - this.separator = separator; - } - - public String getSeparator() { - return separator; - } - - @Override - public void execute(IngestDocument document) { - deDot(document.getSourceAndMetadata()); - } - - @Override - public String getType() { - return TYPE; - } - - /** - * Recursively iterates through Maps and Lists in search of map entries with - * keys containing dots. The dots in these fields are replaced with {@link #separator}. - * - * @param obj The current object in context to be checked for dots in its fields. - */ - private void deDot(Object obj) { - if (obj instanceof Map) { - @SuppressWarnings("unchecked") - Map doc = (Map) obj; - Iterator> it = doc.entrySet().iterator(); - Map deDottedFields = new HashMap<>(); - while (it.hasNext()) { - Map.Entry entry = it.next(); - deDot(entry.getValue()); - String fieldName = entry.getKey(); - if (fieldName.contains(".")) { - String deDottedFieldName = fieldName.replaceAll("\\.", separator); - deDottedFields.put(deDottedFieldName, entry.getValue()); - it.remove(); - } - } - doc.putAll(deDottedFields); - } else if (obj instanceof List) { - @SuppressWarnings("unchecked") - List list = (List) obj; - for (Object value : list) { - deDot(value); - } - } - } - - public static class Factory extends AbstractProcessorFactory { - - @Override - public DeDotProcessor doCreate(String processorTag, Map config) throws Exception { - String separator = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "separator"); - if (separator == null) { - separator = DEFAULT_SEPARATOR; - } - return new DeDotProcessor(processorTag, separator); - } - } -} - diff --git a/core/src/main/java/org/elasticsearch/node/NodeModule.java b/core/src/main/java/org/elasticsearch/node/NodeModule.java index 442dc727007..365e260ebf0 100644 --- a/core/src/main/java/org/elasticsearch/node/NodeModule.java +++ b/core/src/main/java/org/elasticsearch/node/NodeModule.java @@ -28,7 +28,6 @@ import org.elasticsearch.ingest.core.TemplateService; import org.elasticsearch.ingest.processor.AppendProcessor; import org.elasticsearch.ingest.processor.ConvertProcessor; import org.elasticsearch.ingest.processor.DateProcessor; -import org.elasticsearch.ingest.processor.DeDotProcessor; import org.elasticsearch.ingest.processor.FailProcessor; import org.elasticsearch.ingest.processor.GsubProcessor; import org.elasticsearch.ingest.processor.JoinProcessor; @@ -75,7 +74,6 @@ public class NodeModule extends AbstractModule { registerProcessor(ConvertProcessor.TYPE, (templateService) -> new ConvertProcessor.Factory()); registerProcessor(GsubProcessor.TYPE, (templateService) -> new GsubProcessor.Factory()); registerProcessor(FailProcessor.TYPE, FailProcessor.Factory::new); - registerProcessor(DeDotProcessor.TYPE, (templateService) -> new DeDotProcessor.Factory()); } @Override diff --git a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java index c5dd64a67bb..34b6d07e419 100644 --- a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java @@ -247,8 +247,8 @@ public class InternalSettingsPreparer { } if (secret) { - return new String(terminal.readSecret("Enter value for [%s]: ", key)); + return new String(terminal.readSecret("Enter value for [" + key + "]: ", key)); } - return terminal.readText("Enter value for [%s]: ", key); + return terminal.readText("Enter value for [" + key + "]: ", key); } } diff --git a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java new file mode 100644 index 00000000000..f9300d87dc0 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -0,0 +1,398 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.bootstrap.JarHell; +import org.elasticsearch.common.cli.CliTool; +import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.URL; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; + +import static java.util.Collections.unmodifiableSet; +import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE; +import static org.elasticsearch.common.util.set.Sets.newHashSet; + +/** + * A command for the plugin cli to install a plugin into elasticsearch. + * + * The install command takes a plugin id, which may be any of the following: + *
    + *
  • An official elasticsearch plugin name
  • + *
  • Maven coordinates to a plugin zip
  • + *
  • A URL to a plugin zip
  • + *
+ * + * Plugins are packaged as zip files. Each packaged plugin must contain a + * plugin properties file. See {@link PluginInfo}. + *

+ * The installation process first extracts the plugin files into a temporary + * directory in order to verify the plugin satisfies the following requirements: + *

    + *
  • Jar hell does not exist, either between the plugin's own jars, or with elasticsearch
  • + *
  • The plugin is not a module already provided with elasticsearch
  • + *
  • If the plugin contains extra security permissions, the policy file is validated
  • + *
+ *

+ * A plugin may also contain an optional {@code bin} directory which contains scripts. The + * scripts will be installed into a subdirectory of the elasticsearch bin directory, using + * the name of the plugin, and the scripts will be marked executable. + *

+ * A plugin may also contain an optional {@code config} directory which contains configuration + * files specific to the plugin. The config files be installed into a subdirectory of the + * elasticsearch config directory, using the name of the plugin. If any files to be installed + * already exist, they will be skipped. + */ +class InstallPluginCommand extends CliTool.Command { + + private static final String PROPERTY_SUPPORT_STAGING_URLS = "es.plugins.staging"; + + // TODO: make this a resource file generated by gradle + static final Set MODULES = unmodifiableSet(newHashSet( + "lang-expression", + "lang-groovy")); + + // TODO: make this a resource file generated by gradle + static final Set OFFICIAL_PLUGINS = unmodifiableSet(newHashSet( + "analysis-icu", + "analysis-kuromoji", + "analysis-phonetic", + "analysis-smartcn", + "analysis-stempel", + "delete-by-query", + "discovery-azure", + "discovery-ec2", + "discovery-gce", + "lang-javascript", + "lang-painless", + "lang-python", + "mapper-attachments", + "mapper-murmur3", + "mapper-size", + "repository-azure", + "repository-hdfs", + "repository-s3", + "store-smb")); + + private final String pluginId; + private final boolean batch; + + InstallPluginCommand(Terminal terminal, String pluginId, boolean batch) { + super(terminal); + this.pluginId = pluginId; + this.batch = batch; + } + + @Override + public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + + // TODO: remove this leniency!! is it needed anymore? + if (Files.exists(env.pluginsFile()) == false) { + terminal.println("Plugins directory [" + env.pluginsFile() + "] does not exist. Creating..."); + Files.createDirectory(env.pluginsFile()); + } + + Path pluginZip = download(pluginId, env.tmpFile()); + Path extractedZip = unzip(pluginZip, env.pluginsFile()); + install(extractedZip, env); + + return CliTool.ExitStatus.OK; + } + + /** Downloads the plugin and returns the file it was downloaded to. */ + private Path download(String pluginId, Path tmpDir) throws Exception { + if (OFFICIAL_PLUGINS.contains(pluginId)) { + final String version = Version.CURRENT.toString(); + final String url; + if (System.getProperty(PROPERTY_SUPPORT_STAGING_URLS, "false").equals("true")) { + url = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%1$s-%2$s/org/elasticsearch/plugin/%3$s/%1$s/%3$s-%1$s.zip", + version, Build.CURRENT.shortHash(), pluginId); + } else { + url = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%1$s/%2$s/%1$s-%2$s.zip", + pluginId, version); + } + terminal.println("-> Downloading " + pluginId + " from elastic"); + return downloadZipAndChecksum(url, tmpDir); + } + + // now try as maven coordinates, a valid URL would only have a colon and slash + String[] coordinates = pluginId.split(":"); + if (coordinates.length == 3 && pluginId.contains("/") == false) { + String mavenUrl = String.format(Locale.ROOT, "https://repo1.maven.org/maven2/%1$s/%2$s/%3$s/%2$s-%3$s.zip", + coordinates[0].replace(".", "/") /* groupId */, coordinates[1] /* artifactId */, coordinates[2] /* version */); + terminal.println("-> Downloading " + pluginId + " from maven central"); + return downloadZipAndChecksum(mavenUrl, tmpDir); + } + + // fall back to plain old URL + terminal.println("-> Downloading " + URLDecoder.decode(pluginId, "UTF-8")); + return downloadZip(pluginId, tmpDir); + } + + /** Downloads a zip from the url, into a temp file under the given temp dir. */ + private Path downloadZip(String urlString, Path tmpDir) throws IOException { + URL url = new URL(urlString); + Path zip = Files.createTempFile(tmpDir, null, ".zip"); + try (InputStream in = url.openStream()) { + // must overwrite since creating the temp file above actually created the file + Files.copy(in, zip, StandardCopyOption.REPLACE_EXISTING); + } + return zip; + } + + /** Downloads a zip from the url, as well as a SHA1 checksum, and checks the checksum. */ + private Path downloadZipAndChecksum(String urlString, Path tmpDir) throws Exception { + Path zip = downloadZip(urlString, tmpDir); + + URL checksumUrl = new URL(urlString + ".sha1"); + final String expectedChecksum; + try (InputStream in = checksumUrl.openStream()) { + BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); + expectedChecksum = checksumReader.readLine(); + if (checksumReader.readLine() != null) { + throw new UserError(CliTool.ExitStatus.IO_ERROR, "Invalid checksum file at " + checksumUrl); + } + } + + byte[] zipbytes = Files.readAllBytes(zip); + String gotChecksum = MessageDigests.toHexString(MessageDigests.sha1().digest(zipbytes)); + if (expectedChecksum.equals(gotChecksum) == false) { + throw new UserError(CliTool.ExitStatus.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum); + } + + return zip; + } + + private Path unzip(Path zip, Path pluginsDir) throws IOException { + // unzip plugin to a staging temp dir + Path target = Files.createTempDirectory(pluginsDir, ".installing-"); + Files.createDirectories(target); + + // TODO: we should wrap this in a try/catch and try deleting the target dir on failure? + try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) { + ZipEntry entry; + byte[] buffer = new byte[8192]; + while ((entry = zipInput.getNextEntry()) != null) { + Path targetFile = target.resolve(entry.getName()); + // TODO: handle name being an absolute path + + // be on the safe side: do not rely on that directories are always extracted + // before their children (although this makes sense, but is it guaranteed?) + Files.createDirectories(targetFile.getParent()); + if (entry.isDirectory() == false) { + try (OutputStream out = Files.newOutputStream(targetFile)) { + int len; + while((len = zipInput.read(buffer)) >= 0) { + out.write(buffer, 0, len); + } + } + } + zipInput.closeEntry(); + } + } + return target; + } + + /** Load information about the plugin, and verify it can be installed with no errors. */ + private PluginInfo verify(Path pluginRoot, Environment env) throws Exception { + // read and validate the plugin descriptor + PluginInfo info = PluginInfo.readFromProperties(pluginRoot); + terminal.println(VERBOSE, info.toString()); + + // don't let luser install plugin as a module... + // they might be unavoidably in maven central and are packaged up the same way) + if (MODULES.contains(info.getName())) { + throw new UserError(CliTool.ExitStatus.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); + } + + // check for jar hell before any copying + jarHellCheck(pluginRoot, env.pluginsFile(), info.isIsolated()); + + // read optional security policy (extra permissions) + // if it exists, confirm or warn the user + Path policy = pluginRoot.resolve(PluginInfo.ES_PLUGIN_POLICY); + if (Files.exists(policy)) { + PluginSecurity.readPolicy(policy, terminal, env, batch); + } + + return info; + } + + /** check a candidate plugin for jar hell before installing it */ + private void jarHellCheck(Path candidate, Path pluginsDir, boolean isolated) throws Exception { + // create list of current jars in classpath + final List jars = new ArrayList<>(); + jars.addAll(Arrays.asList(JarHell.parseClassPath())); + + // read existing bundles. this does some checks on the installation too. + List bundles = PluginsService.getPluginBundles(pluginsDir); + + // if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins + // thats always the first bundle + if (isolated == false) { + jars.addAll(bundles.get(0).urls); + } + + // add plugin jars to the list + Path pluginJars[] = FileSystemUtils.files(candidate, "*.jar"); + for (Path jar : pluginJars) { + jars.add(jar.toUri().toURL()); + } + // TODO: no jars should be an error + // TODO: verify the classname exists in one of the jars! + + // check combined (current classpath + new jars to-be-added) + JarHell.checkJarHell(jars.toArray(new URL[jars.size()])); + } + + /** + * Installs the plugin from {@code tmpRoot} into the plugins dir. + * If the plugin has a bin dir and/or a config dir, those are copied. + */ + private void install(Path tmpRoot, Environment env) throws Exception { + List deleteOnFailure = new ArrayList<>(); + deleteOnFailure.add(tmpRoot); + + try { + PluginInfo info = verify(tmpRoot, env); + + final Path destination = env.pluginsFile().resolve(info.getName()); + if (Files.exists(destination)) { + throw new UserError(CliTool.ExitStatus.USAGE, "plugin directory " + destination.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command"); + } + + Path tmpBinDir = tmpRoot.resolve("bin"); + if (Files.exists(tmpBinDir)) { + Path destBinDir = env.binFile().resolve(info.getName()); + deleteOnFailure.add(destBinDir); + installBin(info, tmpBinDir, destBinDir); + } + + Path tmpConfigDir = tmpRoot.resolve("config"); + if (Files.exists(tmpConfigDir)) { + // some files may already exist, and we don't remove plugin config files on plugin removal, + // so any installed config files are left on failure too + installConfig(info, tmpConfigDir, env.configFile().resolve(info.getName())); + } + + Files.move(tmpRoot, destination, StandardCopyOption.ATOMIC_MOVE); + terminal.println("-> Installed " + info.getName()); + + } catch (Exception installProblem) { + try { + IOUtils.rm(deleteOnFailure.toArray(new Path[0])); + } catch (IOException exceptionWhileRemovingFiles) { + installProblem.addSuppressed(exceptionWhileRemovingFiles); + } + throw installProblem; + } + } + + /** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */ + private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception { + if (Files.isDirectory(tmpBinDir) == false) { + throw new UserError(CliTool.ExitStatus.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory"); + } + Files.createDirectory(destBinDir); + + // setup file attributes for the installed files to those of the parent dir + Set perms = new HashSet<>(); + PosixFileAttributeView binAttrs = Files.getFileAttributeView(destBinDir.getParent(), PosixFileAttributeView.class); + if (binAttrs != null) { + perms = new HashSet<>(binAttrs.readAttributes().permissions()); + // setting execute bits, since this just means "the file is executable", and actual execution requires read + perms.add(PosixFilePermission.OWNER_EXECUTE); + perms.add(PosixFilePermission.GROUP_EXECUTE); + perms.add(PosixFilePermission.OTHERS_EXECUTE); + } + + try (DirectoryStream stream = Files.newDirectoryStream(tmpBinDir)) { + for (Path srcFile : stream) { + if (Files.isDirectory(srcFile)) { + throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName()); + } + + Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile)); + Files.copy(srcFile, destFile); + + if (perms.isEmpty() == false) { + PosixFileAttributeView view = Files.getFileAttributeView(destFile, PosixFileAttributeView.class); + view.setPermissions(perms); + } + } + } + IOUtils.rm(tmpBinDir); // clean up what we just copied + } + + /** + * Copies the files from {@code tmpConfigDir} into {@code destConfigDir}. + * Any files existing in both the source and destination will be skipped. + */ + private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws Exception { + if (Files.isDirectory(tmpConfigDir) == false) { + throw new UserError(CliTool.ExitStatus.IO_ERROR, "config in plugin " + info.getName() + " is not a directory"); + } + + // create the plugin's config dir "if necessary" + Files.createDirectories(destConfigDir); + + try (DirectoryStream stream = Files.newDirectoryStream(tmpConfigDir)) { + for (Path srcFile : stream) { + if (Files.isDirectory(srcFile)) { + throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName()); + } + + Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile)); + if (Files.exists(destFile) == false) { + Files.copy(srcFile, destFile); + } + } + } + IOUtils.rm(tmpConfigDir); // clean up what we just copied + } +} diff --git a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java new file mode 100644 index 00000000000..6abed4e6bc2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import org.elasticsearch.common.cli.CliTool; +import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; + +/** + * A command for the plugin cli to list plugins installed in elasticsearch. + */ +class ListPluginsCommand extends CliTool.Command { + + ListPluginsCommand(Terminal terminal) { + super(terminal); + } + + @Override + public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + if (Files.exists(env.pluginsFile()) == false) { + throw new IOException("Plugins directory missing: " + env.pluginsFile()); + } + + terminal.println(Terminal.Verbosity.VERBOSE, "Plugins directory: " + env.pluginsFile()); + try (DirectoryStream stream = Files.newDirectoryStream(env.pluginsFile())) { + for (Path plugin : stream) { + terminal.println(plugin.getFileName().toString()); + } + } + + return CliTool.ExitStatus.OK; + } +} diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java new file mode 100644 index 00000000000..30a36501a61 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import org.apache.commons.cli.CommandLine; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.cli.CliTool; +import org.elasticsearch.common.cli.CliToolConfig; +import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.common.logging.log4j.LogConfigurator; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.node.internal.InternalSettingsPreparer; + +import java.util.Locale; + +import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; +import static org.elasticsearch.common.cli.CliToolConfig.Builder.option; + +/** + * A cli tool for adding, removing and listing plugins for elasticsearch. + */ +public class PluginCli extends CliTool { + + // commands + private static final String LIST_CMD_NAME = "list"; + private static final String INSTALL_CMD_NAME = "install"; + private static final String REMOVE_CMD_NAME = "remove"; + + // usage config + private static final CliToolConfig.Cmd LIST_CMD = cmd(LIST_CMD_NAME, ListPluginsCommand.class).build(); + private static final CliToolConfig.Cmd INSTALL_CMD = cmd(INSTALL_CMD_NAME, InstallPluginCommand.class) + .options(option("b", "batch").required(false)) + .build(); + private static final CliToolConfig.Cmd REMOVE_CMD = cmd(REMOVE_CMD_NAME, RemovePluginCommand.class).build(); + + static final CliToolConfig CONFIG = CliToolConfig.config("plugin", PluginCli.class) + .cmds(LIST_CMD, INSTALL_CMD, REMOVE_CMD) + .build(); + + public static void main(String[] args) throws Exception { + // initialize default for es.logger.level because we will not read the logging.yml + String loggerLevel = System.getProperty("es.logger.level", "INFO"); + // Set the appender for all potential log files to terminal so that other components that use the logger print out the + // same terminal. + // The reason for this is that the plugin cli cannot be configured with a file appender because when the plugin command is + // executed there is no way of knowing where the logfiles should be placed. For example, if elasticsearch + // is run as service then the logs should be at /var/log/elasticsearch but when started from the tar they should be at es.home/logs. + // Therefore we print to Terminal. + Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder() + .put("appender.terminal.type", "terminal") + .put("rootLogger", "${es.logger.level}, terminal") + .put("es.logger.level", loggerLevel) + .build(), Terminal.DEFAULT); + // configure but do not read the logging conf file + LogConfigurator.configure(env.settings(), false); + int status = new PluginCli(Terminal.DEFAULT).execute(args).status(); + exit(status); + } + + @SuppressForbidden(reason = "Allowed to exit explicitly from #main()") + private static void exit(int status) { + System.exit(status); + } + + PluginCli(Terminal terminal) { + super(CONFIG, terminal); + } + + @Override + protected Command parse(String cmdName, CommandLine cli) throws Exception { + switch (cmdName.toLowerCase(Locale.ROOT)) { + case LIST_CMD_NAME: + return new ListPluginsCommand(terminal); + case INSTALL_CMD_NAME: + return parseInstallPluginCommand(cli); + case REMOVE_CMD_NAME: + return parseRemovePluginCommand(cli); + default: + assert false : "can't get here as cmd name is validated before this method is called"; + return exitCmd(ExitStatus.USAGE); + } + } + + private Command parseInstallPluginCommand(CommandLine cli) { + String[] args = cli.getArgs(); + if (args.length != 1) { + return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin id argument"); + } + + boolean batch = System.console() == null; + if (cli.hasOption("b")) { + batch = true; + } + + return new InstallPluginCommand(terminal, args[0], batch); + } + + private Command parseRemovePluginCommand(CommandLine cli) { + String[] args = cli.getArgs(); + if (args.length != 1) { + return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin name argument"); + } + + return new RemovePluginCommand(terminal, args[0]); + } +} diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java index 76af7833f06..73464d054dd 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java @@ -82,7 +82,6 @@ public class PluginInfo implements Streamable, ToXContent { if (name == null || name.isEmpty()) { throw new IllegalArgumentException("Property [name] is missing in [" + descriptor + "]"); } - PluginManager.checkForForbiddenName(name); String description = props.getProperty("description"); if (description == null) { throw new IllegalArgumentException("Property [description] is missing for plugin [" + name + "]"); diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java deleted file mode 100644 index a107c957bd4..00000000000 --- a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java +++ /dev/null @@ -1,686 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugins; - -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.Build; -import org.elasticsearch.ElasticsearchCorruptionException; -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; -import org.elasticsearch.bootstrap.JarHell; -import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.http.client.HttpDownloadHelper; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.PluginsService.Bundle; - -import java.io.IOException; -import java.io.OutputStream; -import java.net.MalformedURLException; -import java.net.URL; -import java.nio.file.DirectoryStream; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; -import java.nio.file.attribute.GroupPrincipal; -import java.nio.file.attribute.PosixFileAttributeView; -import java.nio.file.attribute.PosixFileAttributes; -import java.nio.file.attribute.PosixFilePermission; -import java.nio.file.attribute.UserPrincipal; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Locale; -import java.util.Set; -import java.util.stream.StreamSupport; -import java.util.zip.ZipEntry; -import java.util.zip.ZipInputStream; - -import static java.util.Collections.unmodifiableSet; -import static org.elasticsearch.common.Strings.hasLength; -import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE; -import static org.elasticsearch.common.io.FileSystemUtils.moveFilesWithoutOverwriting; -import static org.elasticsearch.common.util.set.Sets.newHashSet; - -/** - * - */ -public class PluginManager { - - public static final String PROPERTY_SUPPORT_STAGING_URLS = "es.plugins.staging"; - - public enum OutputMode { - DEFAULT, SILENT, VERBOSE - } - - private static final Set BLACKLIST = unmodifiableSet(newHashSet( - "elasticsearch", - "elasticsearch.bat", - "elasticsearch.in.sh", - "plugin", - "plugin.bat", - "service.bat")); - - static final Set MODULES = unmodifiableSet(newHashSet( - "lang-expression", - "lang-groovy")); - - static final Set OFFICIAL_PLUGINS = unmodifiableSet(newHashSet( - "analysis-icu", - "analysis-kuromoji", - "analysis-phonetic", - "analysis-smartcn", - "analysis-stempel", - "delete-by-query", - "discovery-azure", - "discovery-ec2", - "discovery-gce", - "discovery-multicast", - "ingest-geoip", - "lang-javascript", - "lang-painless", - "lang-python", - "mapper-attachments", - "mapper-murmur3", - "mapper-size", - "repository-azure", - "repository-hdfs", - "repository-s3", - "store-smb")); - - private final Environment environment; - private URL url; - private OutputMode outputMode; - private TimeValue timeout; - - public PluginManager(Environment environment, URL url, OutputMode outputMode, TimeValue timeout) { - this.environment = environment; - this.url = url; - this.outputMode = outputMode; - this.timeout = timeout; - } - - public void downloadAndExtract(String name, Terminal terminal, boolean batch) throws IOException { - if (name == null && url == null) { - throw new IllegalArgumentException("plugin name or url must be supplied with install."); - } - - if (!Files.exists(environment.pluginsFile())) { - terminal.println("Plugins directory [%s] does not exist. Creating...", environment.pluginsFile()); - Files.createDirectory(environment.pluginsFile()); - } - - if (!Environment.isWritable(environment.pluginsFile())) { - throw new IOException("plugin directory " + environment.pluginsFile() + " is read only"); - } - - PluginHandle pluginHandle; - if (name != null) { - pluginHandle = PluginHandle.parse(name); - checkForForbiddenName(pluginHandle.name); - } else { - // if we have no name but url, use temporary name that will be overwritten later - pluginHandle = new PluginHandle("temp_name" + Randomness.get().nextInt(), null, null); - } - - Path pluginFile = download(pluginHandle, terminal); - extract(pluginHandle, terminal, pluginFile, batch); - } - - private Path download(PluginHandle pluginHandle, Terminal terminal) throws IOException { - Path pluginFile = pluginHandle.newDistroFile(environment); - - HttpDownloadHelper downloadHelper = new HttpDownloadHelper(); - boolean downloaded = false; - boolean verified = false; - HttpDownloadHelper.DownloadProgress progress; - if (outputMode == OutputMode.SILENT) { - progress = new HttpDownloadHelper.NullProgress(); - } else { - progress = new HttpDownloadHelper.VerboseProgress(terminal.writer()); - } - - // first, try directly from the URL provided - if (url != null) { - URL pluginUrl = url; - boolean isSecureProcotol = "https".equalsIgnoreCase(pluginUrl.getProtocol()); - boolean isAuthInfoSet = !Strings.isNullOrEmpty(pluginUrl.getUserInfo()); - if (isAuthInfoSet && !isSecureProcotol) { - throw new IOException("Basic auth is only supported for HTTPS!"); - } - - terminal.println("Trying %s ...", pluginUrl.toExternalForm()); - try { - downloadHelper.download(pluginUrl, pluginFile, progress, this.timeout); - downloaded = true; - terminal.println("Verifying %s checksums if available ...", pluginUrl.toExternalForm()); - Tuple sha1Info = pluginHandle.newChecksumUrlAndFile(environment, pluginUrl, "sha1"); - verified = downloadHelper.downloadAndVerifyChecksum(sha1Info.v1(), pluginFile, - sha1Info.v2(), progress, this.timeout, HttpDownloadHelper.SHA1_CHECKSUM); - Tuple md5Info = pluginHandle.newChecksumUrlAndFile(environment, pluginUrl, "md5"); - verified = verified || downloadHelper.downloadAndVerifyChecksum(md5Info.v1(), pluginFile, - md5Info.v2(), progress, this.timeout, HttpDownloadHelper.MD5_CHECKSUM); - } catch (ElasticsearchTimeoutException | ElasticsearchCorruptionException e) { - throw e; - } catch (Exception e) { - // ignore - terminal.println("Failed: %s", ExceptionsHelper.detailedMessage(e)); - } - } else { - if (PluginHandle.isOfficialPlugin(pluginHandle.name, pluginHandle.user, pluginHandle.version)) { - checkForOfficialPlugins(pluginHandle.name); - } - } - - if (!downloaded && url == null) { - // We try all possible locations - for (URL url : pluginHandle.urls()) { - terminal.println("Trying %s ...", url.toExternalForm()); - try { - downloadHelper.download(url, pluginFile, progress, this.timeout); - downloaded = true; - terminal.println("Verifying %s checksums if available ...", url.toExternalForm()); - Tuple sha1Info = pluginHandle.newChecksumUrlAndFile(environment, url, "sha1"); - verified = downloadHelper.downloadAndVerifyChecksum(sha1Info.v1(), pluginFile, - sha1Info.v2(), progress, this.timeout, HttpDownloadHelper.SHA1_CHECKSUM); - Tuple md5Info = pluginHandle.newChecksumUrlAndFile(environment, url, "md5"); - verified = verified || downloadHelper.downloadAndVerifyChecksum(md5Info.v1(), pluginFile, - md5Info.v2(), progress, this.timeout, HttpDownloadHelper.MD5_CHECKSUM); - break; - } catch (ElasticsearchTimeoutException | ElasticsearchCorruptionException e) { - throw e; - } catch (Exception e) { - terminal.println(VERBOSE, "Failed: %s", ExceptionsHelper.detailedMessage(e)); - } - } - } - - if (!downloaded) { - // try to cleanup what we downloaded - IOUtils.deleteFilesIgnoringExceptions(pluginFile); - throw new IOException("failed to download out of all possible locations..., use --verbose to get detailed information"); - } - - if (verified == false) { - terminal.println("NOTE: Unable to verify checksum for downloaded plugin (unable to find .sha1 or .md5 file to verify)"); - } - return pluginFile; - } - - private void extract(PluginHandle pluginHandle, Terminal terminal, Path pluginFile, boolean batch) throws IOException { - // unzip plugin to a staging temp dir, named for the plugin - Path tmp = Files.createTempDirectory(environment.tmpFile(), null); - Path root = tmp.resolve(pluginHandle.name); - unzipPlugin(pluginFile, root); - - // find the actual root (in case its unzipped with extra directory wrapping) - root = findPluginRoot(root); - - // read and validate the plugin descriptor - PluginInfo info = PluginInfo.readFromProperties(root); - terminal.println(VERBOSE, "%s", info); - - // don't let luser install plugin as a module... - // they might be unavoidably in maven central and are packaged up the same way) - if (MODULES.contains(info.getName())) { - throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); - } - - // update name in handle based on 'name' property found in descriptor file - pluginHandle = new PluginHandle(info.getName(), pluginHandle.version, pluginHandle.user); - final Path extractLocation = pluginHandle.extractedDir(environment); - if (Files.exists(extractLocation)) { - throw new IOException("plugin directory " + extractLocation.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + pluginHandle.name + "' command"); - } - - // check for jar hell before any copying - jarHellCheck(root, info.isIsolated()); - - // read optional security policy (extra permissions) - // if it exists, confirm or warn the user - Path policy = root.resolve(PluginInfo.ES_PLUGIN_POLICY); - if (Files.exists(policy)) { - PluginSecurity.readPolicy(policy, terminal, environment, batch); - } - - // install plugin - FileSystemUtils.copyDirectoryRecursively(root, extractLocation); - terminal.println("Installed %s into %s", pluginHandle.name, extractLocation.toAbsolutePath()); - - // cleanup - tryToDeletePath(terminal, tmp, pluginFile); - - // take care of bin/ by moving and applying permissions if needed - Path sourcePluginBinDirectory = extractLocation.resolve("bin"); - Path destPluginBinDirectory = pluginHandle.binDir(environment); - boolean needToCopyBinDirectory = Files.exists(sourcePluginBinDirectory); - if (needToCopyBinDirectory) { - if (Files.exists(destPluginBinDirectory) && !Files.isDirectory(destPluginBinDirectory)) { - tryToDeletePath(terminal, extractLocation); - throw new IOException("plugin bin directory " + destPluginBinDirectory + " is not a directory"); - } - - try { - copyBinDirectory(sourcePluginBinDirectory, destPluginBinDirectory, pluginHandle.name, terminal); - } catch (IOException e) { - // rollback and remove potentially before installed leftovers - terminal.printError("Error copying bin directory [%s] to [%s], cleaning up, reason: %s", sourcePluginBinDirectory, destPluginBinDirectory, ExceptionsHelper.detailedMessage(e)); - tryToDeletePath(terminal, extractLocation, pluginHandle.binDir(environment)); - throw e; - } - - } - - Path sourceConfigDirectory = extractLocation.resolve("config"); - Path destConfigDirectory = pluginHandle.configDir(environment); - boolean needToCopyConfigDirectory = Files.exists(sourceConfigDirectory); - if (needToCopyConfigDirectory) { - if (Files.exists(destConfigDirectory) && !Files.isDirectory(destConfigDirectory)) { - tryToDeletePath(terminal, extractLocation, destPluginBinDirectory); - throw new IOException("plugin config directory " + destConfigDirectory + " is not a directory"); - } - - try { - terminal.println(VERBOSE, "Found config, moving to %s", destConfigDirectory.toAbsolutePath()); - moveFilesWithoutOverwriting(sourceConfigDirectory, destConfigDirectory, ".new"); - - if (Environment.getFileStore(destConfigDirectory).supportsFileAttributeView(PosixFileAttributeView.class)) { - //We copy owner, group and permissions from the parent ES_CONFIG directory, assuming they were properly set depending - // on how es was installed in the first place: can be root:elasticsearch (750) if es was installed from rpm/deb packages - // or most likely elasticsearch:elasticsearch if installed from tar/zip. As for permissions we don't rely on umask. - PosixFileAttributes parentDirAttributes = Files.getFileAttributeView(destConfigDirectory.getParent(), PosixFileAttributeView.class).readAttributes(); - //for files though, we make sure not to copy execute permissions from the parent dir and leave them untouched - Set baseFilePermissions = new HashSet<>(); - for (PosixFilePermission posixFilePermission : parentDirAttributes.permissions()) { - switch (posixFilePermission) { - case OWNER_EXECUTE: - case GROUP_EXECUTE: - case OTHERS_EXECUTE: - break; - default: - baseFilePermissions.add(posixFilePermission); - } - } - Files.walkFileTree(destConfigDirectory, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - if (attrs.isRegularFile()) { - Set newFilePermissions = new HashSet<>(baseFilePermissions); - Set currentFilePermissions = Files.getPosixFilePermissions(file); - for (PosixFilePermission posixFilePermission : currentFilePermissions) { - switch (posixFilePermission) { - case OWNER_EXECUTE: - case GROUP_EXECUTE: - case OTHERS_EXECUTE: - newFilePermissions.add(posixFilePermission); - } - } - setPosixFileAttributes(file, parentDirAttributes.owner(), parentDirAttributes.group(), newFilePermissions); - } - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { - setPosixFileAttributes(dir, parentDirAttributes.owner(), parentDirAttributes.group(), parentDirAttributes.permissions()); - return FileVisitResult.CONTINUE; - } - }); - } else { - terminal.println(VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission"); - } - - terminal.println(VERBOSE, "Installed %s into %s", pluginHandle.name, destConfigDirectory.toAbsolutePath()); - } catch (IOException e) { - terminal.printError("Error copying config directory [%s] to [%s], cleaning up, reason: %s", sourceConfigDirectory, destConfigDirectory, ExceptionsHelper.detailedMessage(e)); - tryToDeletePath(terminal, extractLocation, destPluginBinDirectory, destConfigDirectory); - throw e; - } - } - } - - private static void setPosixFileAttributes(Path path, UserPrincipal owner, GroupPrincipal group, Set permissions) throws IOException { - PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class); - fileAttributeView.setOwner(owner); - fileAttributeView.setGroup(group); - fileAttributeView.setPermissions(permissions); - } - - static void tryToDeletePath(Terminal terminal, Path ... paths) { - for (Path path : paths) { - try { - IOUtils.rm(path); - } catch (IOException e) { - terminal.printError(e); - } - } - } - - private void copyBinDirectory(Path sourcePluginBinDirectory, Path destPluginBinDirectory, String pluginName, Terminal terminal) throws IOException { - boolean canCopyFromSource = Files.exists(sourcePluginBinDirectory) && Files.isReadable(sourcePluginBinDirectory) && Files.isDirectory(sourcePluginBinDirectory); - if (canCopyFromSource) { - terminal.println(VERBOSE, "Found bin, moving to %s", destPluginBinDirectory.toAbsolutePath()); - if (Files.exists(destPluginBinDirectory)) { - IOUtils.rm(destPluginBinDirectory); - } - try { - Files.createDirectories(destPluginBinDirectory.getParent()); - FileSystemUtils.move(sourcePluginBinDirectory, destPluginBinDirectory); - } catch (IOException e) { - throw new IOException("Could not move [" + sourcePluginBinDirectory + "] to [" + destPluginBinDirectory + "]", e); - } - if (Environment.getFileStore(destPluginBinDirectory).supportsFileAttributeView(PosixFileAttributeView.class)) { - PosixFileAttributes parentDirAttributes = Files.getFileAttributeView(destPluginBinDirectory.getParent(), PosixFileAttributeView.class).readAttributes(); - //copy permissions from parent bin directory - Set filePermissions = new HashSet<>(); - for (PosixFilePermission posixFilePermission : parentDirAttributes.permissions()) { - switch (posixFilePermission) { - case OWNER_EXECUTE: - case GROUP_EXECUTE: - case OTHERS_EXECUTE: - break; - default: - filePermissions.add(posixFilePermission); - } - } - // add file execute permissions to existing perms, so execution will work. - filePermissions.add(PosixFilePermission.OWNER_EXECUTE); - filePermissions.add(PosixFilePermission.GROUP_EXECUTE); - filePermissions.add(PosixFilePermission.OTHERS_EXECUTE); - Files.walkFileTree(destPluginBinDirectory, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - if (attrs.isRegularFile()) { - setPosixFileAttributes(file, parentDirAttributes.owner(), parentDirAttributes.group(), filePermissions); - } - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { - setPosixFileAttributes(dir, parentDirAttributes.owner(), parentDirAttributes.group(), parentDirAttributes.permissions()); - return FileVisitResult.CONTINUE; - } - }); - } else { - terminal.println(VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission"); - } - terminal.println(VERBOSE, "Installed %s into %s", pluginName, destPluginBinDirectory.toAbsolutePath()); - } - } - - /** we check whether we need to remove the top-level folder while extracting - * sometimes (e.g. github) the downloaded archive contains a top-level folder which needs to be removed - */ - private Path findPluginRoot(Path dir) throws IOException { - if (Files.exists(dir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES))) { - return dir; - } else { - final Path[] topLevelFiles = FileSystemUtils.files(dir); - if (topLevelFiles.length == 1 && Files.isDirectory(topLevelFiles[0])) { - Path subdir = topLevelFiles[0]; - if (Files.exists(subdir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES))) { - return subdir; - } - } - } - throw new RuntimeException("Could not find plugin descriptor '" + PluginInfo.ES_PLUGIN_PROPERTIES + "' in plugin zip"); - } - - /** check a candidate plugin for jar hell before installing it */ - private void jarHellCheck(Path candidate, boolean isolated) throws IOException { - // create list of current jars in classpath - final List jars = new ArrayList<>(); - jars.addAll(Arrays.asList(JarHell.parseClassPath())); - - // read existing bundles. this does some checks on the installation too. - List bundles = PluginsService.getPluginBundles(environment.pluginsFile()); - - // if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins - // thats always the first bundle - if (isolated == false) { - jars.addAll(bundles.get(0).urls); - } - - // add plugin jars to the list - Path pluginJars[] = FileSystemUtils.files(candidate, "*.jar"); - for (Path jar : pluginJars) { - jars.add(jar.toUri().toURL()); - } - - // check combined (current classpath + new jars to-be-added) - try { - JarHell.checkJarHell(jars.toArray(new URL[jars.size()])); - } catch (Exception ex) { - throw new RuntimeException(ex); - } - } - - private void unzipPlugin(Path zip, Path target) throws IOException { - Files.createDirectories(target); - - try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) { - ZipEntry entry; - byte[] buffer = new byte[8192]; - while ((entry = zipInput.getNextEntry()) != null) { - Path targetFile = target.resolve(entry.getName()); - - // be on the safe side: do not rely on that directories are always extracted - // before their children (although this makes sense, but is it guaranteed?) - Files.createDirectories(targetFile.getParent()); - if (entry.isDirectory() == false) { - try (OutputStream out = Files.newOutputStream(targetFile)) { - int len; - while((len = zipInput.read(buffer)) >= 0) { - out.write(buffer, 0, len); - } - } - } - zipInput.closeEntry(); - } - } - } - - public void removePlugin(String name, Terminal terminal) throws IOException { - if (name == null) { - throw new IllegalArgumentException("plugin name must be supplied with remove [name]."); - } - PluginHandle pluginHandle = PluginHandle.parse(name); - boolean removed = false; - - checkForForbiddenName(pluginHandle.name); - Path pluginToDelete = pluginHandle.extractedDir(environment); - if (Files.exists(pluginToDelete)) { - terminal.println(VERBOSE, "Removing: %s", pluginToDelete); - try { - IOUtils.rm(pluginToDelete); - } catch (IOException ex){ - throw new IOException("Unable to remove " + pluginHandle.name + ". Check file permissions on " + - pluginToDelete.toString(), ex); - } - removed = true; - } - Path binLocation = pluginHandle.binDir(environment); - if (Files.exists(binLocation)) { - terminal.println(VERBOSE, "Removing: %s", binLocation); - try { - IOUtils.rm(binLocation); - } catch (IOException ex){ - throw new IOException("Unable to remove " + pluginHandle.name + ". Check file permissions on " + - binLocation.toString(), ex); - } - removed = true; - } - - if (removed) { - terminal.println("Removed %s", name); - } else { - terminal.println("Plugin %s not found. Run \"plugin list\" to get list of installed plugins.", name); - } - } - - static void checkForForbiddenName(String name) { - if (!hasLength(name) || BLACKLIST.contains(name.toLowerCase(Locale.ROOT))) { - throw new IllegalArgumentException("Illegal plugin name: " + name); - } - } - - protected static void checkForOfficialPlugins(String name) { - // We make sure that users can use only new short naming for official plugins only - if (!OFFICIAL_PLUGINS.contains(name)) { - throw new IllegalArgumentException(name + - " is not an official plugin so you should install it using elasticsearch/" + - name + "/latest naming form."); - } - } - - public Path[] getListInstalledPlugins() throws IOException { - if (!Files.exists(environment.pluginsFile())) { - return new Path[0]; - } - - try (DirectoryStream stream = Files.newDirectoryStream(environment.pluginsFile())) { - return StreamSupport.stream(stream.spliterator(), false).toArray(length -> new Path[length]); - } - } - - public void listInstalledPlugins(Terminal terminal) throws IOException { - Path[] plugins = getListInstalledPlugins(); - terminal.println("Installed plugins in %s:", environment.pluginsFile().toAbsolutePath()); - if (plugins == null || plugins.length == 0) { - terminal.println(" - No plugin detected"); - } else { - for (Path plugin : plugins) { - terminal.println(" - " + plugin.getFileName()); - } - } - } - - /** - * Helper class to extract properly user name, repository name, version and plugin name - * from plugin name given by a user. - */ - static class PluginHandle { - - final String version; - final String user; - final String name; - - PluginHandle(String name, String version, String user) { - this.version = version; - this.user = user; - this.name = name; - } - - List urls() { - List urls = new ArrayList<>(); - if (version != null) { - // Elasticsearch new download service uses groupId org.elasticsearch.plugin from 2.0.0 - if (user == null) { - if (!Strings.isNullOrEmpty(System.getProperty(PROPERTY_SUPPORT_STAGING_URLS))) { - addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", version, Build.CURRENT.shortHash(), name, version, name, version)); - } - addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip", name, version, name, version)); - } else { - // Elasticsearch old download service - addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/%1$s/%2$s/%2$s-%3$s.zip", user, name, version)); - // Maven central repository - addUrl(urls, String.format(Locale.ROOT, "https://search.maven.org/remotecontent?filepath=%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version)); - // Sonatype repository - addUrl(urls, String.format(Locale.ROOT, "https://oss.sonatype.org/service/local/repositories/releases/content/%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version)); - // Github repository - addUrl(urls, String.format(Locale.ROOT, "https://github.com/%1$s/%2$s/archive/%3$s.zip", user, name, version)); - } - } - if (user != null) { - // Github repository for master branch (assume site) - addUrl(urls, String.format(Locale.ROOT, "https://github.com/%1$s/%2$s/archive/master.zip", user, name)); - } - return urls; - } - - private static void addUrl(List urls, String url) { - try { - urls.add(new URL(url)); - } catch (MalformedURLException e) { - // We simply ignore malformed URL - } - } - - Path newDistroFile(Environment env) throws IOException { - return Files.createTempFile(env.tmpFile(), name, ".zip"); - } - - Tuple newChecksumUrlAndFile(Environment env, URL originalUrl, String suffix) throws IOException { - URL newUrl = new URL(originalUrl.toString() + "." + suffix); - return new Tuple<>(newUrl, Files.createTempFile(env.tmpFile(), name, ".zip." + suffix)); - } - - Path extractedDir(Environment env) { - return env.pluginsFile().resolve(name); - } - - Path binDir(Environment env) { - return env.binFile().resolve(name); - } - - Path configDir(Environment env) { - return env.configFile().resolve(name); - } - - static PluginHandle parse(String name) { - String[] elements = name.split("/"); - // We first consider the simplest form: pluginname - String repo = elements[0]; - String user = null; - String version = null; - - // We consider the form: username/pluginname - if (elements.length > 1) { - user = elements[0]; - repo = elements[1]; - - // We consider the form: username/pluginname/version - if (elements.length > 2) { - version = elements[2]; - } - } - - if (isOfficialPlugin(repo, user, version)) { - return new PluginHandle(repo, Version.CURRENT.number(), null); - } - - return new PluginHandle(repo, version, user); - } - - static boolean isOfficialPlugin(String repo, String user, String version) { - return version == null && user == null && !Strings.isNullOrEmpty(repo); - } - } - -} diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginManagerCliParser.java b/core/src/main/java/org/elasticsearch/plugins/PluginManagerCliParser.java deleted file mode 100644 index a8a51db971c..00000000000 --- a/core/src/main/java/org/elasticsearch/plugins/PluginManagerCliParser.java +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugins; - -import org.apache.commons.cli.CommandLine; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolConfig; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.logging.log4j.LogConfigurator; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.plugins.PluginManager.OutputMode; - -import java.net.MalformedURLException; -import java.net.URL; -import java.net.URLDecoder; -import java.util.Locale; - -import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; -import static org.elasticsearch.common.cli.CliToolConfig.Builder.option; - -public class PluginManagerCliParser extends CliTool { - - // By default timeout is 0 which means no timeout - public static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueMillis(0); - - private static final CliToolConfig CONFIG = CliToolConfig.config("plugin", PluginManagerCliParser.class) - .cmds(ListPlugins.CMD, Install.CMD, Remove.CMD) - .build(); - - public static void main(String[] args) { - // initialize default for es.logger.level because we will not read the logging.yml - String loggerLevel = System.getProperty("es.logger.level", "INFO"); - // Set the appender for all potential log files to terminal so that other components that use the logger print out the - // same terminal. - // The reason for this is that the plugin cli cannot be configured with a file appender because when the plugin command is - // executed there is no way of knowing where the logfiles should be placed. For example, if elasticsearch - // is run as service then the logs should be at /var/log/elasticsearch but when started from the tar they should be at es.home/logs. - // Therefore we print to Terminal. - Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder() - .put("appender.terminal.type", "terminal") - .put("rootLogger", "${es.logger.level}, terminal") - .put("es.logger.level", loggerLevel) - .build(), Terminal.DEFAULT); - // configure but do not read the logging conf file - LogConfigurator.configure(env.settings(), false); - int status = new PluginManagerCliParser().execute(args).status(); - exit(status); - } - - @SuppressForbidden(reason = "Allowed to exit explicitly from #main()") - private static void exit(int status) { - System.exit(status); - } - - public PluginManagerCliParser() { - super(CONFIG); - } - - public PluginManagerCliParser(Terminal terminal) { - super(CONFIG, terminal); - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - switch (cmdName.toLowerCase(Locale.ROOT)) { - case Install.NAME: - return Install.parse(terminal, cli); - case ListPlugins.NAME: - return ListPlugins.parse(terminal, cli); - case Remove.NAME: - return Remove.parse(terminal, cli); - default: - assert false : "can't get here as cmd name is validated before this method is called"; - return exitCmd(ExitStatus.USAGE); - } - } - - /** - * List all installed plugins - */ - static class ListPlugins extends CliTool.Command { - - private static final String NAME = "list"; - - private static final CliToolConfig.Cmd CMD = cmd(NAME, ListPlugins.class).build(); - private final OutputMode outputMode; - - public static Command parse(Terminal terminal, CommandLine cli) { - OutputMode outputMode = OutputMode.DEFAULT; - if (cli.hasOption("s")) { - outputMode = OutputMode.SILENT; - } - if (cli.hasOption("v")) { - outputMode = OutputMode.VERBOSE; - } - - return new ListPlugins(terminal, outputMode); - } - - ListPlugins(Terminal terminal, OutputMode outputMode) { - super(terminal); - this.outputMode = outputMode; - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - PluginManager pluginManager = new PluginManager(env, null, outputMode, DEFAULT_TIMEOUT); - pluginManager.listInstalledPlugins(terminal); - return ExitStatus.OK; - } - } - - /** - * Remove a plugin - */ - static class Remove extends CliTool.Command { - - private static final String NAME = "remove"; - - private static final CliToolConfig.Cmd CMD = cmd(NAME, Remove.class).build(); - - public static Command parse(Terminal terminal, CommandLine cli) { - String[] args = cli.getArgs(); - if (args.length == 0) { - return exitCmd(ExitStatus.USAGE, terminal, "plugin name is missing (type -h for help)"); - } - - OutputMode outputMode = OutputMode.DEFAULT; - if (cli.hasOption("s")) { - outputMode = OutputMode.SILENT; - } - if (cli.hasOption("v")) { - outputMode = OutputMode.VERBOSE; - } - - return new Remove(terminal, outputMode, args[0]); - } - - private OutputMode outputMode; - final String pluginName; - - Remove(Terminal terminal, OutputMode outputMode, String pluginToRemove) { - super(terminal); - this.outputMode = outputMode; - this.pluginName = pluginToRemove; - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - - PluginManager pluginManager = new PluginManager(env, null, outputMode, DEFAULT_TIMEOUT); - terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "..."); - pluginManager.removePlugin(pluginName, terminal); - return ExitStatus.OK; - } - } - - /** - * Installs a plugin - */ - static class Install extends Command { - - private static final String NAME = "install"; - - private static final CliToolConfig.Cmd CMD = cmd(NAME, Install.class) - .options(option("t", "timeout").required(false).hasArg(false)) - .options(option("b", "batch").required(false)) - .build(); - - static Command parse(Terminal terminal, CommandLine cli) { - String[] args = cli.getArgs(); - - // install [plugin-name/url] - if ((args == null) || (args.length == 0)) { - return exitCmd(ExitStatus.USAGE, terminal, "plugin name or url is missing (type -h for help)"); - } - String name = args[0]; - - URL optionalPluginUrl = null; - // try parsing cli argument as URL - try { - optionalPluginUrl = new URL(name); - name = null; - } catch (MalformedURLException e) { - // we tried to parse the cli argument as url and failed - // continue treating it as a symbolic plugin name like `analysis-icu` etc. - } - - TimeValue timeout = TimeValue.parseTimeValue(cli.getOptionValue("t"), DEFAULT_TIMEOUT, "cli"); - - OutputMode outputMode = OutputMode.DEFAULT; - if (cli.hasOption("s")) { - outputMode = OutputMode.SILENT; - } - if (cli.hasOption("v")) { - outputMode = OutputMode.VERBOSE; - } - - boolean batch = System.console() == null; - if (cli.hasOption("b")) { - batch = true; - } - - return new Install(terminal, name, outputMode, optionalPluginUrl, timeout, batch); - } - - final String name; - private OutputMode outputMode; - final URL url; - final TimeValue timeout; - final boolean batch; - - Install(Terminal terminal, String name, OutputMode outputMode, URL url, TimeValue timeout, boolean batch) { - super(terminal); - this.name = name; - this.outputMode = outputMode; - this.url = url; - this.timeout = timeout; - this.batch = batch; - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - PluginManager pluginManager = new PluginManager(env, url, outputMode, timeout); - if (name != null) { - terminal.println("-> Installing " + Strings.coalesceToEmpty(name) + "..."); - } else { - terminal.println("-> Installing from " + URLDecoder.decode(url.toString(), "UTF-8") + "..."); - } - pluginManager.downloadAndExtract(name, terminal, batch); - return ExitStatus.OK; - } - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java b/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java index fd7f2d84e21..9bbafa6e16a 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java @@ -19,6 +19,7 @@ package org.elasticsearch.plugins; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.cli.Terminal.Verbosity; import org.elasticsearch.env.Environment; @@ -38,7 +39,7 @@ import java.util.Comparator; import java.util.List; class PluginSecurity { - + /** * Reads plugin policy, prints/confirms exceptions */ @@ -49,7 +50,7 @@ class PluginSecurity { terminal.print(Verbosity.VERBOSE, "plugin has a policy file with no additional permissions"); return; } - + // sort permissions in a reasonable order Collections.sort(requested, new Comparator() { @Override @@ -80,13 +81,13 @@ class PluginSecurity { return cmp; } }); - + terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); terminal.println(Verbosity.NORMAL, "@ WARNING: plugin requires additional permissions @"); terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); // print all permissions: for (Permission permission : requested) { - terminal.println(Verbosity.NORMAL, "* %s", formatPermission(permission)); + terminal.println(Verbosity.NORMAL, "* " + formatPermission(permission)); } terminal.println(Verbosity.NORMAL, "See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html"); terminal.println(Verbosity.NORMAL, "for descriptions of what these permissions allow and the associated risks."); @@ -98,11 +99,11 @@ class PluginSecurity { } } } - + /** Format permission type, name, and actions into a string */ static String formatPermission(Permission permission) { StringBuilder sb = new StringBuilder(); - + String clazz = null; if (permission instanceof UnresolvedPermission) { clazz = ((UnresolvedPermission) permission).getUnresolvedType(); @@ -110,7 +111,7 @@ class PluginSecurity { clazz = permission.getClass().getName(); } sb.append(clazz); - + String name = null; if (permission instanceof UnresolvedPermission) { name = ((UnresolvedPermission) permission).getUnresolvedName(); @@ -121,7 +122,7 @@ class PluginSecurity { sb.append(' '); sb.append(name); } - + String actions = null; if (permission instanceof UnresolvedPermission) { actions = ((UnresolvedPermission) permission).getUnresolvedActions(); @@ -134,7 +135,7 @@ class PluginSecurity { } return sb.toString(); } - + /** * Parses plugin policy into a set of permissions */ @@ -151,8 +152,8 @@ class PluginSecurity { } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } - PluginManager.tryToDeletePath(terminal, emptyPolicyFile); - + IOUtils.rm(emptyPolicyFile); + // parse the plugin's policy file into a set of permissions final Policy policy; try { diff --git a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java new file mode 100644 index 00000000000..8ce1056bbfd --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.CliTool; +import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE; + +/** + * A command for the plugin cli to remove a plugin from elasticsearch. + */ +class RemovePluginCommand extends CliTool.Command { + private final String pluginName; + + public RemovePluginCommand(Terminal terminal, String pluginName) { + super(terminal); + this.pluginName = pluginName; + } + + @Override + public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "..."); + + Path pluginDir = env.pluginsFile().resolve(pluginName); + if (Files.exists(pluginDir) == false) { + throw new UserError(CliTool.ExitStatus.USAGE, "Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins."); + } + + List pluginPaths = new ArrayList<>(); + + Path pluginBinDir = env.binFile().resolve(pluginName); + if (Files.exists(pluginBinDir)) { + if (Files.isDirectory(pluginBinDir) == false) { + throw new UserError(CliTool.ExitStatus.IO_ERROR, "Bin dir for " + pluginName + " is not a directory"); + } + pluginPaths.add(pluginBinDir); + terminal.println(VERBOSE, "Removing: " + pluginBinDir); + } + + terminal.println(VERBOSE, "Removing: " + pluginDir); + Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName); + Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE); + pluginPaths.add(tmpPluginDir); + + IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()])); + + return CliTool.ExitStatus.OK; + } +} diff --git a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java index badccbb9579..a96ed3d6424 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java @@ -20,13 +20,9 @@ package org.elasticsearch.rest.action.ingest; import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.ingest.WritePipelineResponse; -import org.elasticsearch.action.ingest.WritePipelineResponseRestListener; import org.elasticsearch.client.Client; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; @@ -34,7 +30,6 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.support.AcknowledgedRestListener; import org.elasticsearch.rest.action.support.RestActions; -import java.io.IOException; public class RestPutPipelineAction extends BaseRestHandler { @@ -49,7 +44,7 @@ public class RestPutPipelineAction extends BaseRestHandler { PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), RestActions.getRestContent(restRequest)); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); - client.admin().cluster().putPipeline(request, new WritePipelineResponseRestListener(channel)); + client.admin().cluster().putPipeline(request, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java index 94f80a9b611..fc2e834ea75 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java @@ -47,6 +47,6 @@ public class RestSimulatePipelineAction extends BaseRestHandler { SimulatePipelineRequest request = new SimulatePipelineRequest(RestActions.getRestContent(restRequest)); request.setId(restRequest.param("id")); request.setVerbose(restRequest.paramAsBoolean("verbose", false)); - client.admin().cluster().simulatePipeline(request, new RestStatusToXContentListener<>(channel)); + client.admin().cluster().simulatePipeline(request, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/support/RestUtils.java b/core/src/main/java/org/elasticsearch/rest/support/RestUtils.java index 56bb18d5e6e..167e858c1df 100644 --- a/core/src/main/java/org/elasticsearch/rest/support/RestUtils.java +++ b/core/src/main/java/org/elasticsearch/rest/support/RestUtils.java @@ -57,7 +57,7 @@ public class RestUtils { if (fromIndex >= s.length()) { return; } - + int queryStringLength = s.contains("#") ? s.indexOf("#") : s.length(); String name = null; diff --git a/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java index 8c042cd1937..69fc73e4af0 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.function.Supplier; /** * Base class for delegating transport response to a transport channel @@ -30,7 +31,7 @@ import java.io.IOException; public abstract class TransportChannelResponseHandler implements TransportResponseHandler { /** - * Convenience method for delegating an empty response to the provided changed + * Convenience method for delegating an empty response to the provided transport channel */ public static TransportChannelResponseHandler emptyResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) { return new TransportChannelResponseHandler(logger, channel, extraInfoOnError) { @@ -41,6 +42,19 @@ public abstract class TransportChannelResponseHandler TransportChannelResponseHandler responseHandler(ESLogger logger, Supplier responseSupplier, TransportChannel channel, String extraInfoOnError) { + return new TransportChannelResponseHandler(logger, channel, extraInfoOnError) { + @Override + public T newInstance() { + return responseSupplier.get(); + } + }; + } + + private final ESLogger logger; private final TransportChannel channel; private final String extraInfoOnError; diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index a6a1cab4f05..b050b2cb71f 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Scope; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -39,8 +40,8 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -56,6 +57,8 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.function.Supplier; +import static java.util.Collections.emptyList; +import static org.elasticsearch.common.settings.Setting.listSetting; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** @@ -92,9 +95,10 @@ public class TransportService extends AbstractLifecycleComponent> TRACE_LOG_INCLUDE_SETTING = Setting.listSetting("transport.tracer.include", Collections.emptyList(), Function.identity(), true, Setting.Scope.CLUSTER); - public static final Setting> TRACE_LOG_EXCLUDE_SETTING = Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Setting.Scope.CLUSTER); - + public static final Setting> TRACE_LOG_INCLUDE_SETTING = listSetting("transport.tracer.include", emptyList(), + Function.identity(), true, Scope.CLUSTER); + public static final Setting> TRACE_LOG_EXCLUDE_SETTING = listSetting("transport.tracer.exclude", + Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Scope.CLUSTER); private final ESLogger tracerLog; @@ -757,7 +761,8 @@ public class TransportService extends AbstractLifecycleComponent> HOST = Setting.listSetting("transport.host", emptyList(), s -> s, false, Setting.Scope.CLUSTER); - public static final Setting> PUBLISH_HOST = Setting.listSetting("transport.publish_host", HOST, s -> s, false, Setting.Scope.CLUSTER); - public static final Setting> BIND_HOST = Setting.listSetting("transport.bind_host", HOST, s -> s, false, Setting.Scope.CLUSTER); - public static final Setting PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Setting.Scope.CLUSTER); - public static final Setting PUBLISH_PORT = Setting.intSetting("transport.publish_port", -1, -1, false, Setting.Scope.CLUSTER); + public static final Setting> HOST = listSetting("transport.host", emptyList(), s -> s, false, Scope.CLUSTER); + public static final Setting> PUBLISH_HOST = listSetting("transport.publish_host", HOST, s -> s, false, Scope.CLUSTER); + public static final Setting> BIND_HOST = listSetting("transport.bind_host", HOST, s -> s, false, Scope.CLUSTER); + public static final Setting PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Scope.CLUSTER); + public static final Setting PUBLISH_PORT = intSetting("transport.publish_port", -1, -1, false, Scope.CLUSTER); public static final String DEFAULT_PROFILE = "default"; - public static final Setting TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER); + public static final Setting TRANSPORT_PROFILES_SETTING = groupSetting("transport.profiles.", true, Scope.CLUSTER); private TransportSettings() { diff --git a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java index 7a41bf626c6..a5db72b9b5f 100644 --- a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java @@ -97,7 +97,8 @@ public class LocalTransport extends AbstractLifecycleComponent implem int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1); logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize); final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, LOCAL_TRANSPORT_THREAD_NAME_PREFIX); - this.workers = EsExecutors.newFixed(LOCAL_TRANSPORT_THREAD_NAME_PREFIX, workerCount, queueSize, threadFactory, threadPool.getThreadContext()); + this.workers = EsExecutors.newFixed(LOCAL_TRANSPORT_THREAD_NAME_PREFIX, workerCount, queueSize, threadFactory, + threadPool.getThreadContext()); this.namedWriteableRegistry = namedWriteableRegistry; } @@ -199,7 +200,8 @@ public class LocalTransport extends AbstractLifecycleComponent implem } @Override - public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { + public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, + TransportRequestOptions options) throws IOException, TransportException { final Version version = Version.smallest(node.version(), this.version); try (BytesStreamOutput stream = new BytesStreamOutput()) { @@ -237,7 +239,8 @@ public class LocalTransport extends AbstractLifecycleComponent implem return this.workers; } - protected void messageReceived(byte[] data, String action, LocalTransport sourceTransport, Version version, @Nullable final Long sendRequestId) { + protected void messageReceived(byte[] data, String action, LocalTransport sourceTransport, Version version, + @Nullable final Long sendRequestId) { Transports.assertTransportThread(); try { transportServiceAdapter.received(data.length); @@ -278,7 +281,8 @@ public class LocalTransport extends AbstractLifecycleComponent implem stream = new NamedWriteableAwareStreamInput(stream, namedWriteableRegistry); final String action = stream.readString(); transportServiceAdapter.onRequestReceived(requestId, action); - final LocalTransportChannel transportChannel = new LocalTransportChannel(this, transportServiceAdapter, sourceTransport, action, requestId, version); + final LocalTransportChannel transportChannel = new LocalTransportChannel(this, transportServiceAdapter, sourceTransport, action, + requestId, version); try { final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); if (reg == null) { @@ -334,7 +338,8 @@ public class LocalTransport extends AbstractLifecycleComponent implem try { response.readFrom(buffer); } catch (Throwable e) { - handleException(handler, new TransportSerializationException("Failed to deserialize response of type [" + response.getClass().getName() + "]", e)); + handleException(handler, new TransportSerializationException( + "Failed to deserialize response of type [" + response.getClass().getName() + "]", e)); return; } handleParsedResponse(response, handler); diff --git a/core/src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java b/core/src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java index aad31fd8ccd..41eb7354098 100644 --- a/core/src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java +++ b/core/src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java @@ -46,7 +46,8 @@ public class LocalTransportChannel implements TransportChannel { private final long requestId; private final Version version; - public LocalTransportChannel(LocalTransport sourceTransport, TransportServiceAdapter sourceTransportServiceAdapter, LocalTransport targetTransport, String action, long requestId, Version version) { + public LocalTransportChannel(LocalTransport sourceTransport, TransportServiceAdapter sourceTransportServiceAdapter, + LocalTransport targetTransport, String action, long requestId, Version version) { this.sourceTransport = sourceTransport; this.sourceTransportServiceAdapter = sourceTransportServiceAdapter; this.targetTransport = targetTransport; @@ -94,7 +95,8 @@ public class LocalTransportChannel implements TransportChannel { public void sendResponse(Throwable error) throws IOException { BytesStreamOutput stream = new BytesStreamOutput(); writeResponseExceptionHeader(stream); - RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), targetTransport.boundAddress().boundAddresses()[0], action, error); + RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), + targetTransport.boundAddress().boundAddresses()[0], action, error); stream.writeThrowable(tx); final byte[] data = stream.bytes().toBytes(); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java index 6732b26ddbb..fca979f9bc9 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java @@ -116,7 +116,9 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { } catch (NotCompressedException ex) { int maxToRead = Math.min(buffer.readableBytes(), 10); int offset = buffer.readerIndex(); - StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead).append("] content bytes out of [").append(buffer.readableBytes()).append("] readable bytes with message size [").append(size).append("] ").append("] are ["); + StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead) + .append("] content bytes out of [").append(buffer.readableBytes()) + .append("] readable bytes with message size [").append(size).append("] ").append("] are ["); for (int i = 0; i < maxToRead; i++) { sb.append(buffer.getByte(offset + i)).append(","); } @@ -134,15 +136,17 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { final int nextByte = streamIn.read(); // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker if (nextByte != -1) { - throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" - + action + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting"); + throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" + action + + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting"); } if (buffer.readerIndex() < expectedIndexReader) { - throw new IllegalStateException("Message is fully read (request), yet there are " + (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting"); + throw new IllegalStateException("Message is fully read (request), yet there are " + + (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting"); } if (buffer.readerIndex() > expectedIndexReader) { - throw new IllegalStateException("Message read past expected size (request) for requestId [" + requestId + "], action [" - + action + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting"); + throw new IllegalStateException( + "Message read past expected size (request) for requestId [" + requestId + "], action [" + action + + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting"); } } else { @@ -163,11 +167,12 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { + handler + "], error [" + TransportStatus.isError(status) + "]; resetting"); } if (buffer.readerIndex() < expectedIndexReader) { - throw new IllegalStateException("Message is fully read (response), yet there are " + (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting"); + throw new IllegalStateException("Message is fully read (response), yet there are " + + (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting"); } if (buffer.readerIndex() > expectedIndexReader) { - throw new IllegalStateException("Message read past expected size (response) for requestId [" + requestId + "], handler [" - + handler + "], error [" + TransportStatus.isError(status) + "]; resetting"); + throw new IllegalStateException("Message read past expected size (response) for requestId [" + requestId + + "], handler [" + handler + "], error [" + TransportStatus.isError(status) + "]; resetting"); } } @@ -193,7 +198,8 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { try { response.readFrom(buffer); } catch (Throwable e) { - handleException(handler, new TransportSerializationException("Failed to deserialize response of type [" + response.getClass().getName() + "]", e)); + handleException(handler, new TransportSerializationException( + "Failed to deserialize response of type [" + response.getClass().getName() + "]", e)); return; } try { @@ -247,7 +253,8 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { buffer = new NamedWriteableAwareStreamInput(buffer, transport.namedWriteableRegistry); final String action = buffer.readString(); transportServiceAdapter.onRequestReceived(requestId, action); - final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, requestId, version, profileName); + final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, + requestId, version, profileName); try { final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); if (reg == null) { diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 99fbac17b69..8b174ecb19c 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -42,6 +42,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService.TcpSettings; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Scope; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -119,6 +120,10 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.settings.Setting.boolSetting; +import static org.elasticsearch.common.settings.Setting.byteSizeSetting; +import static org.elasticsearch.common.settings.Setting.intSetting; +import static org.elasticsearch.common.settings.Setting.timeSetting; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException; import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException; @@ -143,21 +148,33 @@ public class NettyTransport extends AbstractLifecycleComponent implem public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss"; public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), - false, Setting.Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_RECOVERY = Setting.intSetting("transport.connections_per_node.recovery", 2, 1, false, Setting.Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_BULK = Setting.intSetting("transport.connections_per_node.bulk", 3, 1, false, Setting.Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_REG = Setting.intSetting("transport.connections_per_node.reg", 6, 1, false, Setting.Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_STATE = Setting.intSetting("transport.connections_per_node.state", 1, 1, false, Setting.Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_PING = Setting.intSetting("transport.connections_per_node.ping", 1, 1, false, Setting.Scope.CLUSTER); + (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), false, Setting.Scope.CLUSTER); + public static final Setting CONNECTIONS_PER_NODE_RECOVERY = intSetting("transport.connections_per_node.recovery", 2, 1, false, + Scope.CLUSTER); + public static final Setting CONNECTIONS_PER_NODE_BULK = intSetting("transport.connections_per_node.bulk", 3, 1, false, + Scope.CLUSTER); + public static final Setting CONNECTIONS_PER_NODE_REG = intSetting("transport.connections_per_node.reg", 6, 1, false, + Scope.CLUSTER); + public static final Setting CONNECTIONS_PER_NODE_STATE = intSetting("transport.connections_per_node.state", 1, 1, false, + Scope.CLUSTER); + public static final Setting CONNECTIONS_PER_NODE_PING = intSetting("transport.connections_per_node.ping", 1, 1, false, + Scope.CLUSTER); // the scheduled internal ping interval setting, defaults to disabled (-1) - public static final Setting PING_SCHEDULE = Setting.timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_CLIENT = Setting.boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, false, Setting.Scope.CLUSTER); - public static final Setting TCP_CONNECT_TIMEOUT = Setting.timeSetting("transport.tcp.connect_timeout", TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER); - public static final Setting TCP_NO_DELAY = Setting.boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, false, Setting.Scope.CLUSTER); - public static final Setting TCP_KEEP_ALIVE = Setting.boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_SERVER = Setting.boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, false, Setting.Scope.CLUSTER); - public static final Setting TCP_REUSE_ADDRESS = Setting.boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, false, Setting.Scope.CLUSTER); + public static final Setting PING_SCHEDULE = timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false, + Setting.Scope.CLUSTER); + public static final Setting TCP_BLOCKING_CLIENT = boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, + false, Setting.Scope.CLUSTER); + public static final Setting TCP_CONNECT_TIMEOUT = timeSetting("transport.tcp.connect_timeout", + TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER); + public static final Setting TCP_NO_DELAY = boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, false, + Setting.Scope.CLUSTER); + public static final Setting TCP_KEEP_ALIVE = boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, false, + Setting.Scope.CLUSTER); + public static final Setting TCP_BLOCKING_SERVER = boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, + false, Setting.Scope.CLUSTER); + public static final Setting TCP_REUSE_ADDRESS = boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, + false, Setting.Scope.CLUSTER); public static final Setting TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER); public static final Setting TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER); @@ -165,9 +182,9 @@ public class NettyTransport extends AbstractLifecycleComponent implem public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); public static final Setting NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, false, Setting.Scope.CLUSTER); - // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one - public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting("transport.netty.receive_predictor_size", + public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( + "transport.netty.receive_predictor_size", settings -> { long defaultReceiverPredictor = 512 * 1024; if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { @@ -177,10 +194,11 @@ public class NettyTransport extends AbstractLifecycleComponent implem } return new ByteSizeValue(defaultReceiverPredictor).toString(); }, false, Setting.Scope.CLUSTER); - public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = Setting.byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = Setting.byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting NETTY_BOSS_COUNT = Setting.intSetting("transport.netty.boss_count", 1, 1, false, Setting.Scope.CLUSTER); - + public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("transport.netty.receive_predictor_min", + NETTY_RECEIVE_PREDICTOR_SIZE, false, Scope.CLUSTER); + public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("transport.netty.receive_predictor_max", + NETTY_RECEIVE_PREDICTOR_SIZE, false, Scope.CLUSTER); + public static final Setting NETTY_BOSS_COUNT = intSetting("transport.netty.boss_count", 1, 1, false, Scope.CLUSTER); protected final NetworkService networkService; protected final Version version; @@ -226,7 +244,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem final ScheduledPing scheduledPing; @Inject - public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version, NamedWriteableRegistry namedWriteableRegistry) { + public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version, + NamedWriteableRegistry namedWriteableRegistry) { super(settings); this.threadPool = threadPool; this.networkService = networkService; @@ -252,7 +271,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) { receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes()); } else { - receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes()); + receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), + (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes()); } this.scheduledPing = new ScheduledPing(); @@ -305,7 +325,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem String name = entry.getKey(); if (!Strings.hasLength(name)) { - logger.info("transport profile configured without a name. skipping profile with settings [{}]", profileSettings.toDelimitedString(',')); + logger.info("transport profile configured without a name. skipping profile with settings [{}]", + profileSettings.toDelimitedString(',')); continue; } else if (TransportSettings.DEFAULT_PROFILE.equals(name)) { profileSettings = settingsBuilder() @@ -345,13 +366,16 @@ public class NettyTransport extends AbstractLifecycleComponent implem private ClientBootstrap createClientBootstrap() { if (blockingClient) { - clientBootstrap = new ClientBootstrap(new OioClientSocketChannelFactory(Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)))); + clientBootstrap = new ClientBootstrap(new OioClientSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)))); } else { int bossCount = NETTY_BOSS_COUNT.get(settings); - clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory( - Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)), - bossCount, - new NioWorkerPool(Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)), workerCount), + clientBootstrap = new ClientBootstrap( + new NioClientSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)), + bossCount, + new NioWorkerPool(Executors.newCachedThreadPool( + daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)), workerCount), new HashedWheelTimer(daemonThreadFactory(settings, "transport_client_timer")))); } clientBootstrap.setPipelineFactory(configureClientChannelPipelineFactory()); @@ -403,12 +427,14 @@ public class NettyTransport extends AbstractLifecycleComponent implem boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TcpSettings.TCP_REUSE_ADDRESS.get(settings)); fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress); - ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE.get(settings)); + ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", + TCP_SEND_BUFFER_SIZE.get(settings)); if (fallbackTcpSendBufferSize.bytes() >= 0) { fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize); } - ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE.get(settings)); + ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", + TCP_RECEIVE_BUFFER_SIZE.get(settings)); if (fallbackTcpBufferSize.bytes() >= 0) { fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize); } @@ -485,7 +511,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem return boundSocket.get(); } - private BoundTransportAddress createBoundTransportAddress(String name, Settings profileSettings, List boundAddresses) { + private BoundTransportAddress createBoundTransportAddress(String name, Settings profileSettings, + List boundAddresses) { String[] boundAddressesHostStrings = new String[boundAddresses.size()]; TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()]; for (int i = 0; i < boundAddresses.size(); i++) { @@ -531,7 +558,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem // TODO: In case of DEFAULT_PROFILE we should probably fail here, as publish address does not match any bound address // In case of a custom profile, we might use the publish address of the default profile publishPort = boundAddresses.get(0).getPort(); - logger.warn("Publish port not found by matching publish address [{}] to bound addresses [{}], falling back to port [{}] of first bound address", publishInetAddress, boundAddresses, publishPort); + logger.warn("Publish port not found by matching publish address [{}] to bound addresses [{}], " + + "falling back to port [{}] of first bound address", publishInetAddress, boundAddresses, publishPort); } final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); @@ -549,8 +577,13 @@ public class NettyTransport extends AbstractLifecycleComponent implem ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.getDefault(settings); ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.getDefault(settings); - logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]", - name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, receivePredictorMax); + if (logger.isDebugEnabled()) { + logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], " + + "connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]", + name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, + connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, + receivePredictorMax); + } final ThreadFactory bossFactory = daemonThreadFactory(this.settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX, name); final ThreadFactory workerFactory = daemonThreadFactory(this.settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, name); @@ -739,7 +772,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem return; } if (isCloseConnectionException(e.getCause())) { - logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel()); + logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), + ctx.getChannel()); // close the channel, which will cause a node to be disconnected if relevant ctx.getChannel().close(); disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); @@ -754,7 +788,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem ctx.getChannel().close(); disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); } else if (e.getCause() instanceof CancelledKeyException) { - logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel()); + logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), + ctx.getChannel()); // close the channel as safe measure, which will cause a node to be disconnected if relevant ctx.getChannel().close(); disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); @@ -800,7 +835,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem } @Override - public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { + public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, + TransportRequestOptions options) throws IOException, TransportException { Channel targetChannel = nodeChannel(node, options); @@ -902,7 +938,9 @@ public class NettyTransport extends AbstractLifecycleComponent implem if (light) { nodeChannels = connectToChannelsLight(node); } else { - nodeChannels = new NodeChannels(new Channel[connectionsPerNodeRecovery], new Channel[connectionsPerNodeBulk], new Channel[connectionsPerNodeReg], new Channel[connectionsPerNodeState], new Channel[connectionsPerNodePing]); + nodeChannels = new NodeChannels(new Channel[connectionsPerNodeRecovery], new Channel[connectionsPerNodeBulk], + new Channel[connectionsPerNodeReg], new Channel[connectionsPerNodeState], + new Channel[connectionsPerNodePing]); try { connectToChannels(nodeChannels, node); } catch (Throwable e) { diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java index aaf33c2fd5a..c89523074dc 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java @@ -53,7 +53,8 @@ public class NettyTransportChannel implements TransportChannel { private final long requestId; private final String profileName; - public NettyTransportChannel(NettyTransport transport, TransportServiceAdapter transportServiceAdapter, String action, Channel channel, long requestId, Version version, String profileName) { + public NettyTransportChannel(NettyTransport transport, TransportServiceAdapter transportServiceAdapter, String action, Channel channel, + long requestId, Version version, String profileName) { this.transportServiceAdapter = transportServiceAdapter; this.version = version; this.transport = transport; @@ -119,7 +120,8 @@ public class NettyTransportChannel implements TransportChannel { public void sendResponse(Throwable error) throws IOException { BytesStreamOutput stream = new BytesStreamOutput(); stream.skip(NettyHeader.HEADER_SIZE); - RemoteTransportException tx = new RemoteTransportException(transport.nodeName(), transport.wrapAddress(channel.getLocalAddress()), action, error); + RemoteTransportException tx = new RemoteTransportException(transport.nodeName(), transport.wrapAddress(channel.getLocalAddress()), + action, error); stream.writeThrowable(tx); byte status = 0; status = TransportStatus.setResponse(status); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java b/core/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java index f38dc1dc02d..aab83d293d8 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java @@ -80,8 +80,8 @@ public class SizeHeaderFrameDecoder extends FrameDecoder { } // safety against too large frames being sent if (dataLen > NINETY_PER_HEAP_SIZE) { - throw new TooLongFrameException( - "transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); + throw new TooLongFrameException("transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" + + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); } if (buffer.readableBytes() < dataLen + 6) { diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index 44d35305a60..88c4dc75222 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -20,6 +20,7 @@ package org.elasticsearch.tribe; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -83,8 +84,10 @@ import static java.util.Collections.unmodifiableMap; */ public class TribeService extends AbstractLifecycleComponent { - public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE)); - public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE)); + public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, + RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE)); + public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false, + RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE)); public static Settings processSettings(Settings settings) { if (TRIBE_NAME_SETTING.exists(settings)) { @@ -106,7 +109,8 @@ public class TribeService extends AbstractLifecycleComponent { Settings.Builder sb = Settings.builder().put(settings); sb.put(Node.NODE_CLIENT_SETTING.getKey(), true); // this node should just act as a node client sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery - sb.put(DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); // nothing is going to be discovered, since no master will be elected + // nothing is going to be discovered, since no master will be elected + sb.put(DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); if (sb.get("cluster.name") == null) { sb.put("cluster.name", "tribe_" + Strings.randomBase64UUID()); // make sure it won't join other tribe nodes in the same JVM } @@ -114,7 +118,8 @@ public class TribeService extends AbstractLifecycleComponent { return sb.build(); } - private static final Setting TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", false, Setting.Scope.CLUSTER); // internal settings only + // internal settings only + private static final Setting TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", false, Setting.Scope.CLUSTER); private final ClusterService clusterService; private final String[] blockIndicesWrite; private final String[] blockIndicesRead; @@ -125,14 +130,20 @@ public class TribeService extends AbstractLifecycleComponent { if (ON_CONFLICT_ANY.equals(s) || ON_CONFLICT_DROP.equals(s) || s.startsWith(ON_CONFLICT_PREFER)) { return s; } - throw new IllegalArgumentException("Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: " +s); + throw new IllegalArgumentException( + "Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: " + s); }, false, Setting.Scope.CLUSTER); - public static final Setting BLOCKS_METADATA_SETTING = Setting.boolSetting("tribe.blocks.metadata", false, false, Setting.Scope.CLUSTER); - public static final Setting BLOCKS_WRITE_SETTING = Setting.boolSetting("tribe.blocks.write", false, false, Setting.Scope.CLUSTER); - public static final Setting> BLOCKS_WRITE_INDICES_SETTING = Setting.listSetting("tribe.blocks.write.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting> BLOCKS_READ_INDICES_SETTING = Setting.listSetting("tribe.blocks.read.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting> BLOCKS_METADATA_INDICES_SETTING = Setting.listSetting("tribe.blocks.metadata.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting BLOCKS_METADATA_SETTING = Setting.boolSetting("tribe.blocks.metadata", false, false, + Setting.Scope.CLUSTER); + public static final Setting BLOCKS_WRITE_SETTING = Setting.boolSetting("tribe.blocks.write", false, false, + Setting.Scope.CLUSTER); + public static final Setting> BLOCKS_WRITE_INDICES_SETTING = Setting.listSetting("tribe.blocks.write.indices", + Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting> BLOCKS_READ_INDICES_SETTING = Setting.listSetting("tribe.blocks.read.indices", + Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting> BLOCKS_METADATA_INDICES_SETTING = Setting.listSetting("tribe.blocks.metadata.indices", + Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); private final String onConflict; private final Set droppedIndices = ConcurrentCollections.newConcurrentSet(); @@ -304,7 +315,8 @@ public class TribeService extends AbstractLifecycleComponent { tribeAttr.put(attr.key, attr.value); } tribeAttr.put(TRIBE_NAME_SETTING.getKey(), tribeName); - DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); + DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), + tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); clusterStateChanged = true; logger.info("[{}] adding node [{}]", tribeName, discoNode); nodes.put(discoNode); @@ -328,7 +340,8 @@ public class TribeService extends AbstractLifecycleComponent { // always make sure to update the metadata and routing table, in case // there are changes in them (new mapping, shards moving from initializing to started) routingTable.add(tribeState.routingTable().index(index.getIndex())); - Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME_SETTING.getKey(), tribeName).build(); + Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()) + .put(TRIBE_NAME_SETTING.getKey(), tribeName).build(); metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); } } @@ -357,7 +370,8 @@ public class TribeService extends AbstractLifecycleComponent { } else if (ON_CONFLICT_DROP.equals(onConflict)) { // drop the indices, there is a conflict clusterStateChanged = true; - logger.info("[{}] dropping index {} due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); + logger.info("[{}] dropping index {} due to conflict with [{}]", tribeName, tribeIndex.getIndex(), + existingFromTribe); removeIndex(blocks, metaData, routingTable, tribeIndex); droppedIndices.add(tribeIndex.getIndex().getName()); } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { @@ -366,7 +380,8 @@ public class TribeService extends AbstractLifecycleComponent { if (tribeName.equals(preferredTribeName)) { // the new one is hte preferred one, replace... clusterStateChanged = true; - logger.info("[{}] adding index {}, preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); + logger.info("[{}] adding index {}, preferred over [{}]", tribeName, tribeIndex.getIndex(), + existingFromTribe); removeIndex(blocks, metaData, routingTable, tribeIndex); addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); } // else: either the existing one is the preferred one, or we haven't seen one, carry on @@ -378,17 +393,20 @@ public class TribeService extends AbstractLifecycleComponent { if (!clusterStateChanged) { return currentState; } else { - return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build(); + return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData) + .routingTable(routingTable.build()).build(); } } - private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) { + private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, + IndexMetaData index) { metaData.remove(index.getIndex().getName()); routingTable.remove(index.getIndex().getName()); blocks.removeIndexBlocks(index.getIndex().getName()); } - private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { + private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, + RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME_SETTING.getKey(), tribeName).build(); metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex())); diff --git a/core/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java b/core/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java index 5ff6525a428..7c1cd060952 100644 --- a/core/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java +++ b/core/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java @@ -83,7 +83,8 @@ public class ResourceWatcherService extends AbstractLifecycleComponent, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/core/src/test/java/org/elasticsearch/action/ingest/WritePipelineResponseTests.java b/core/src/test/java/org/elasticsearch/action/ingest/WritePipelineResponseTests.java index 8eb3f4ece75..3f252c37072 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/WritePipelineResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/WritePipelineResponseTests.java @@ -21,13 +21,11 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.ingest.core.PipelineFactoryError; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; public class WritePipelineResponseTests extends ESTestCase { @@ -45,17 +43,13 @@ public class WritePipelineResponseTests extends ESTestCase { } public void testSerializationWithError() throws IOException { - PipelineFactoryError error = new PipelineFactoryError("error"); - WritePipelineResponse response = new WritePipelineResponse(error); + WritePipelineResponse response = new WritePipelineResponse(); BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = StreamInput.wrap(out.bytes()); WritePipelineResponse otherResponse = new WritePipelineResponse(); otherResponse.readFrom(streamInput); - assertThat(otherResponse.getError().getReason(), equalTo(response.getError().getReason())); - assertThat(otherResponse.getError().getProcessorType(), equalTo(response.getError().getProcessorType())); - assertThat(otherResponse.getError().getProcessorTag(), equalTo(response.getError().getProcessorTag())); - assertThat(otherResponse.getError().getProcessorPropertyName(), equalTo(response.getError().getProcessorPropertyName())); + assertThat(otherResponse.isAcknowledged(), equalTo(response.isAcknowledged())); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 49a5e072e1f..8e7b70a3b21 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -56,12 +56,12 @@ public class ClusterStateCreationUtils { /** * Creates cluster state with and index that has one shard and #(replicaStates) replicas * - * @param index name of the index - * @param primaryLocal if primary should coincide with the local node in the cluster state - * @param primaryState state of primary - * @param replicaStates states of the replicas. length of this array determines also the number of replicas + * @param index name of the index + * @param activePrimaryLocal if active primary should coincide with the local node in the cluster state + * @param primaryState state of primary + * @param replicaStates states of the replicas. length of this array determines also the number of replicas */ - public static ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) { + public static ClusterState state(String index, boolean activePrimaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) { final int numberOfReplicas = replicaStates.length; int numberOfNodes = numberOfReplicas + 1; @@ -97,7 +97,7 @@ public class ClusterStateCreationUtils { String relocatingNode = null; UnassignedInfo unassignedInfo = null; if (primaryState != ShardRoutingState.UNASSIGNED) { - if (primaryLocal) { + if (activePrimaryLocal) { primaryNode = newNode(0).id(); unassignedNodes.remove(primaryNode); } else { @@ -173,13 +173,13 @@ public class ClusterStateCreationUtils { * Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas. * Primary will be STARTED in cluster state but replicas will be one of UNASSIGNED, INITIALIZING, STARTED or RELOCATING. * - * @param index name of the index - * @param primaryLocal if primary should coincide with the local node in the cluster state - * @param numberOfReplicas number of replicas + * @param index name of the index + * @param activePrimaryLocal if active primary should coincide with the local node in the cluster state + * @param numberOfReplicas number of replicas */ - public static ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) { + public static ClusterState stateWithActivePrimary(String index, boolean activePrimaryLocal, int numberOfReplicas) { int assignedReplicas = randomIntBetween(0, numberOfReplicas); - return stateWithStartedPrimary(index, primaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas); + return stateWithActivePrimary(index, activePrimaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas); } /** @@ -188,11 +188,11 @@ public class ClusterStateCreationUtils { * some (assignedReplicas) will be one of INITIALIZING, STARTED or RELOCATING. * * @param index name of the index - * @param primaryLocal if primary should coincide with the local node in the cluster state + * @param activePrimaryLocal if active primary should coincide with the local node in the cluster state * @param assignedReplicas number of replicas that should have INITIALIZING, STARTED or RELOCATING state * @param unassignedReplicas number of replicas that should be unassigned */ - public static ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int assignedReplicas, int unassignedReplicas) { + public static ClusterState stateWithActivePrimary(String index, boolean activePrimaryLocal, int assignedReplicas, int unassignedReplicas) { ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas]; // no point in randomizing - node assignment later on does it too. for (int i = 0; i < assignedReplicas; i++) { @@ -201,7 +201,7 @@ public class ClusterStateCreationUtils { for (int i = assignedReplicas; i < replicaStates.length; i++) { replicaStates[i] = ShardRoutingState.UNASSIGNED; } - return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates); + return state(index, activePrimaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates); } /** diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 402a454649b..2e4e3cb475b 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -37,10 +37,13 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -52,6 +55,7 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.transport.CapturingTransport; @@ -66,6 +70,7 @@ import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -75,9 +80,10 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary; +import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.empty; @@ -203,6 +209,56 @@ public class TransportReplicationActionTests extends ESTestCase { assertIndexShardCounter(1); } + /** + * When relocating a primary shard, there is a cluster state update at the end of relocation where the active primary is switched from + * the relocation source to the relocation target. If relocation source receives and processes this cluster state + * before the relocation target, there is a time span where relocation source believes active primary to be on + * relocation target and relocation target believes active primary to be on relocation source. This results in replication + * requests being sent back and forth. + * + * This test checks that replication request is not routed back from relocation target to relocation source in case of + * stale index routing table on relocation target. + */ + public void testNoRerouteOnStaleClusterState() throws InterruptedException, ExecutionException { + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + ClusterState state = state(index, true, ShardRoutingState.RELOCATING); + String relocationTargetNode = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId(); + state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(relocationTargetNode)).build(); + clusterService.setState(state); + logger.debug("--> relocation ongoing state:\n{}", clusterService.state().prettyPrint()); + + Request request = new Request(shardId).timeout("1ms").routedBasedOnClusterVersion(clusterService.state().version() + 1); + PlainActionFuture listener = new PlainActionFuture<>(); + TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener); + reroutePhase.run(); + assertListenerThrows("cluster state too old didn't cause a timeout", listener, UnavailableShardsException.class); + + request = new Request(shardId).routedBasedOnClusterVersion(clusterService.state().version() + 1); + listener = new PlainActionFuture<>(); + reroutePhase = action.new ReroutePhase(null, request, listener); + reroutePhase.run(); + assertFalse("cluster state too old didn't cause a retry", listener.isDone()); + + // finish relocation + ShardRouting relocationTarget = clusterService.state().getRoutingTable().shardRoutingTable(shardId).shardsWithState(ShardRoutingState.INITIALIZING).get(0); + AllocationService allocationService = ESAllocationTestCase.createAllocationService(); + RoutingAllocation.Result result = allocationService.applyStartedShards(state, Arrays.asList(relocationTarget)); + ClusterState updatedState = ClusterState.builder(clusterService.state()).routingResult(result).build(); + + clusterService.setState(updatedState); + logger.debug("--> relocation complete state:\n{}", clusterService.state().prettyPrint()); + + IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); + final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); + final List capturedRequests = + transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); + assertThat(capturedRequests, notNullValue()); + assertThat(capturedRequests.size(), equalTo(1)); + assertThat(capturedRequests.get(0).action, equalTo("testAction[p]")); + assertIndexShardCounter(1); + } + public void testUnknownIndexOrShardOnReroute() throws InterruptedException { final String index = "test"; // no replicas in oder to skip the replication part @@ -225,7 +281,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); - clusterService.setState(stateWithStartedPrimary(index, randomBoolean(), 3)); + clusterService.setState(stateWithActivePrimary(index, randomBoolean(), 3)); logger.debug("using state: \n{}", clusterService.state().prettyPrint()); @@ -249,33 +305,73 @@ public class TransportReplicationActionTests extends ESTestCase { assertIndexShardUninitialized(); } - public void testPrimaryPhaseExecutesRequest() throws InterruptedException, ExecutionException { + public void testPrimaryPhaseExecutesOrDelegatesRequestToRelocationTarget() throws InterruptedException, ExecutionException { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); - clusterService.setState(state(index, true, ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + ClusterState state = stateWithActivePrimary(index, true, randomInt(5)); + clusterService.setState(state); Request request = new Request(shardId).timeout("1ms"); PlainActionFuture listener = new PlainActionFuture<>(); - TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener)); + AtomicBoolean movedToReplication = new AtomicBoolean(); + TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener)) { + @Override + void finishAndMoveToReplication(TransportReplicationAction.ReplicationPhase replicationPhase) { + super.finishAndMoveToReplication(replicationPhase); + movedToReplication.set(true); + } + }; + ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard(); + boolean executeOnPrimary = true; + if (primaryShard.relocating() && randomBoolean()) { // whether shard has been marked as relocated already (i.e. relocation completed) + isRelocated.set(true); + indexShardRouting.set(primaryShard); + executeOnPrimary = false; + } primaryPhase.run(); - assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); - final String replicaNodeId = clusterService.state().getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards().get(0).currentNodeId(); - final List requests = transport.getCapturedRequestsByTargetNodeAndClear().get(replicaNodeId); - assertThat(requests, notNullValue()); - assertThat(requests.size(), equalTo(1)); - assertThat("replica request was not sent", requests.get(0).action, equalTo("testAction[r]")); + assertThat(request.processedOnPrimary.get(), equalTo(executeOnPrimary)); + assertThat(movedToReplication.get(), equalTo(executeOnPrimary)); + if (executeOnPrimary == false) { + final List requests = transport.capturedRequestsByTargetNode().get(primaryShard.relocatingNodeId()); + assertThat(requests, notNullValue()); + assertThat(requests.size(), equalTo(1)); + assertThat("primary request was not delegated to relocation target", requests.get(0).action, equalTo("testAction[p]")); + } + } + + public void testPrimaryPhaseExecutesDelegatedRequestOnRelocationTarget() throws InterruptedException, ExecutionException { + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + ClusterState state = state(index, true, ShardRoutingState.RELOCATING); + String primaryTargetNodeId = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId(); + // simulate execution of the primary phase on the relocation target node + state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryTargetNodeId)).build(); + clusterService.setState(state); + Request request = new Request(shardId).timeout("1ms"); + PlainActionFuture listener = new PlainActionFuture<>(); + AtomicBoolean movedToReplication = new AtomicBoolean(); + TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener)) { + @Override + void finishAndMoveToReplication(TransportReplicationAction.ReplicationPhase replicationPhase) { + super.finishAndMoveToReplication(replicationPhase); + movedToReplication.set(true); + } + }; + primaryPhase.run(); + assertThat("request was not processed on primary relocation target", request.processedOnPrimary.get(), equalTo(true)); + assertThat(movedToReplication.get(), equalTo(true)); } public void testAddedReplicaAfterPrimaryOperation() { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); // start with no replicas - clusterService.setState(stateWithStartedPrimary(index, true, 0)); + clusterService.setState(stateWithActivePrimary(index, true, 0)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); final ClusterState stateWithAddedReplicas = state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED); final Action actionWithAddedReplicaAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { @Override - protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { + protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception { final Tuple operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest); // add replicas after primary operation ((TestClusterService) clusterService).setState(stateWithAddedReplicas); @@ -302,13 +398,13 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); // start with a replica - clusterService.setState(state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED)); + clusterService.setState(state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); final ClusterState stateWithRelocatingReplica = state(index, true, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); final Action actionWithRelocatingReplicasAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { @Override - protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { + protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception { final Tuple operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest); // set replica to relocating ((TestClusterService) clusterService).setState(stateWithRelocatingReplica); @@ -341,7 +437,7 @@ public class TransportReplicationActionTests extends ESTestCase { final Action actionWithDeletedIndexAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { @Override - protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { + protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception { final Tuple operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest); // delete index after primary op ((TestClusterService) clusterService).setState(stateWithDeletedIndex); @@ -432,7 +528,13 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); - clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); + ClusterState state = stateWithActivePrimary(index, true, randomInt(5)); + ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard(); + if (primaryShard.relocating() && randomBoolean()) { + // simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated + state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build(); + } + clusterService.setState(state); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); int assignedReplicas = 0; @@ -448,19 +550,26 @@ public class TransportReplicationActionTests extends ESTestCase { } } - runReplicateTest(shardRoutingTable, assignedReplicas, totalShards); + runReplicateTest(state, shardRoutingTable, assignedReplicas, totalShards); } public void testReplicationWithShadowIndex() throws ExecutionException, InterruptedException { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); - ClusterState state = stateWithStartedPrimary(index, true, randomInt(5)); + ClusterState state = stateWithActivePrimary(index, true, randomInt(5)); MetaData.Builder metaData = MetaData.builder(state.metaData()); Settings.Builder settings = Settings.builder().put(metaData.get(index).getSettings()); settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true); metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings)); - clusterService.setState(ClusterState.builder(state).metaData(metaData)); + state = ClusterState.builder(state).metaData(metaData).build(); + + ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard(); + if (primaryShard.relocating() && randomBoolean()) { + // simulate execution of the primary phase on the relocation target node + state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build(); + } + clusterService.setState(state); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); int assignedReplicas = 0; @@ -472,18 +581,22 @@ public class TransportReplicationActionTests extends ESTestCase { totalShards++; } } - runReplicateTest(shardRoutingTable, assignedReplicas, totalShards); + runReplicateTest(state, shardRoutingTable, assignedReplicas, totalShards); } - protected void runReplicateTest(IndexShardRoutingTable shardRoutingTable, int assignedReplicas, int totalShards) throws InterruptedException, ExecutionException { + protected void runReplicateTest(ClusterState state, IndexShardRoutingTable shardRoutingTable, int assignedReplicas, int totalShards) throws InterruptedException, ExecutionException { final ShardIterator shardIt = shardRoutingTable.shardsIt(); final ShardId shardId = shardIt.shardId(); final Request request = new Request(shardId); final PlainActionFuture listener = new PlainActionFuture<>(); logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint()); - Releasable reference = getOrCreateIndexShardOperationsCounter(); + TransportReplicationAction.IndexShardReference reference = getOrCreateIndexShardOperationsCounter(); + + ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard(); + indexShardRouting.set(primaryShard); + assertIndexShardCounter(2); // TODO: set a default timeout TransportReplicationAction.ReplicationPhase replicationPhase = @@ -507,8 +620,9 @@ public class TransportReplicationActionTests extends ESTestCase { assertEquals(request.shardId, replicationRequest.shardId); } + String localNodeId = clusterService.state().getNodes().localNodeId(); // no request was sent to the local node - assertThat(nodesSentTo.keySet(), not(hasItem(clusterService.state().getNodes().localNodeId()))); + assertThat(nodesSentTo.keySet(), not(hasItem(localNodeId))); // requests were sent to the correct shard copies for (ShardRouting shard : clusterService.state().getRoutingTable().shardRoutingTable(shardId)) { @@ -518,11 +632,11 @@ public class TransportReplicationActionTests extends ESTestCase { if (shard.unassigned()) { continue; } - if (shard.primary() == false) { - nodesSentTo.remove(shard.currentNodeId()); + if (localNodeId.equals(shard.currentNodeId()) == false) { + assertThat(nodesSentTo.remove(shard.currentNodeId()), notNullValue()); } - if (shard.relocating()) { - nodesSentTo.remove(shard.relocatingNodeId()); + if (shard.relocating() && localNodeId.equals(shard.relocatingNodeId()) == false) { // for relocating primaries, we replicate from target to source if source is marked as relocated + assertThat(nodesSentTo.remove(shard.relocatingNodeId()), notNullValue()); } } @@ -629,6 +743,7 @@ public class TransportReplicationActionTests extends ESTestCase { // shard operation should be ongoing, so the counter is at 2 // we have to wait here because increment happens in thread assertBusy(() -> assertIndexShardCounter(2)); + assertThat(transport.capturedRequests().length, equalTo(0)); ((ActionWithDelay) action).countDownLatch.countDown(); t.join(); @@ -644,6 +759,8 @@ public class TransportReplicationActionTests extends ESTestCase { // one replica to make sure replication is attempted clusterService.setState(state(index, true, ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + ShardRouting primaryShard = clusterService.state().routingTable().shardRoutingTable(shardId).primaryShard(); + indexShardRouting.set(primaryShard); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); Request request = new Request(shardId).timeout("100ms"); PlainActionFuture listener = new PlainActionFuture<>(); @@ -726,12 +843,28 @@ public class TransportReplicationActionTests extends ESTestCase { private final AtomicInteger count = new AtomicInteger(0); + private final AtomicBoolean isRelocated = new AtomicBoolean(false); + + private final AtomicReference indexShardRouting = new AtomicReference<>(); + /* * Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run. * */ - private synchronized Releasable getOrCreateIndexShardOperationsCounter() { + private synchronized TransportReplicationAction.IndexShardReference getOrCreateIndexShardOperationsCounter() { count.incrementAndGet(); - return new Releasable() { + return new TransportReplicationAction.IndexShardReference() { + @Override + public boolean isRelocated() { + return isRelocated.get(); + } + + @Override + public ShardRouting routingEntry() { + ShardRouting shardRouting = indexShardRouting.get(); + assert shardRouting != null; + return shardRouting; + } + @Override public void close() { count.decrementAndGet(); @@ -783,7 +916,7 @@ public class TransportReplicationActionTests extends ESTestCase { } @Override - protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { + protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception { boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true); assert executedBefore == false : "request has already been executed on the primary"; return new Tuple<>(new Response(), shardRequest); @@ -805,7 +938,11 @@ public class TransportReplicationActionTests extends ESTestCase { } @Override - protected Releasable getIndexShardOperationsCounter(ShardId shardId) { + protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) { + return getOrCreateIndexShardOperationsCounter(); + } + + protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId) { return getOrCreateIndexShardOperationsCounter(); } } @@ -832,7 +969,7 @@ public class TransportReplicationActionTests extends ESTestCase { } @Override - protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { + protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) { return throwException(shardRequest.shardId()); } @@ -870,7 +1007,7 @@ public class TransportReplicationActionTests extends ESTestCase { } @Override - protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable { + protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception { awaitLatch(); return new Tuple<>(new Response(), shardRequest); } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index b5b1f955ae0..d23e11e2fb3 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -279,6 +279,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/16373") public void testOldIndexes() throws Exception { setupCluster(); diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 4e8d1d9266d..3ad8d5013b2 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.action.shard; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -28,6 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardIterator; @@ -38,6 +40,9 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoveryService; +import org.elasticsearch.index.Index; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Before; @@ -45,12 +50,15 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.not; public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCase { @@ -119,9 +127,25 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa tasks.addAll(failingTasks); tasks.addAll(nonExistentTasks); ClusterStateTaskExecutor.BatchResult result = failingExecutor.execute(currentState, tasks); - Map taskResultMap = - failingTasks.stream().collect(Collectors.toMap(Function.identity(), task -> false)); - taskResultMap.putAll(nonExistentTasks.stream().collect(Collectors.toMap(Function.identity(), task -> true))); + Map taskResultMap = + failingTasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.failure(new RuntimeException("simulated applyFailedShards failure")))); + taskResultMap.putAll(nonExistentTasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.success()))); + assertTaskResults(taskResultMap, result, currentState, false); + } + + public void testIllegalShardFailureRequests() throws Exception { + String reason = "test illegal shard failure requests"; + ClusterState currentState = createClusterStateWithStartedShards(reason); + List failingTasks = createExistingShards(currentState, reason); + List tasks = new ArrayList<>(); + for (ShardStateAction.ShardRoutingEntry failingTask : failingTasks) { + tasks.add(new ShardStateAction.ShardRoutingEntry(failingTask.getShardRouting(), randomInvalidSourceShard(currentState, failingTask.getShardRouting()), failingTask.message, failingTask.failure)); + } + Map taskResultMap = + tasks.stream().collect(Collectors.toMap( + Function.identity(), + task -> ClusterStateTaskExecutor.TaskResult.failure(new ShardStateAction.NoLongerPrimaryShardException(task.getShardRouting().shardId(), "source shard [" + task.sourceShardRouting + "] is neither the local allocation nor the primary allocation")))); + ClusterStateTaskExecutor.BatchResult result = executor.execute(currentState, tasks); assertTaskResults(taskResultMap, result, currentState, false); } @@ -156,17 +180,22 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa for (int i = 0; i < numberOfTasks; i++) { shardsToFail.add(randomFrom(failures)); } - return toTasks(shardsToFail, indexUUID, reason); + return toTasks(currentState, shardsToFail, indexUUID, reason); } private List createNonExistentShards(ClusterState currentState, String reason) { // add shards from a non-existent index - MetaData nonExistentMetaData = - MetaData.builder() - .put(IndexMetaData.builder("non-existent").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(numberOfReplicas)) - .build(); - RoutingTable routingTable = RoutingTable.builder().addAsNew(nonExistentMetaData.index("non-existent")).build(); - String nonExistentIndexUUID = nonExistentMetaData.index("non-existent").getIndexUUID(); + String nonExistentIndexUUID = "non-existent"; + Index index = new Index("non-existent", nonExistentIndexUUID); + List nodeIds = new ArrayList<>(); + for (ObjectCursor nodeId : currentState.nodes().getNodes().keys()) { + nodeIds.add(nodeId.toString()); + } + List nonExistentShards = new ArrayList<>(); + nonExistentShards.add(nonExistentShardRouting(index, nodeIds, true)); + for (int i = 0; i < numberOfReplicas; i++) { + nonExistentShards.add(nonExistentShardRouting(index, nodeIds, false)); + } List existingShards = createExistingShards(currentState, reason); List shardsWithMismatchedAllocationIds = new ArrayList<>(); @@ -174,28 +203,32 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa ShardRouting sr = existingShard.getShardRouting(); ShardRouting nonExistentShardRouting = TestShardRouting.newShardRouting(sr.index(), sr.id(), sr.currentNodeId(), sr.relocatingNodeId(), sr.restoreSource(), sr.primary(), sr.state(), sr.version()); - shardsWithMismatchedAllocationIds.add(new ShardStateAction.ShardRoutingEntry(nonExistentShardRouting, existingShard.indexUUID, existingShard.message, existingShard.failure)); + shardsWithMismatchedAllocationIds.add(new ShardStateAction.ShardRoutingEntry(nonExistentShardRouting, nonExistentShardRouting, existingShard.message, existingShard.failure)); } List tasks = new ArrayList<>(); - tasks.addAll(toTasks(routingTable.allShards(), nonExistentIndexUUID, reason)); + nonExistentShards.forEach(shard -> tasks.add(new ShardStateAction.ShardRoutingEntry(shard, shard, reason, new CorruptIndexException("simulated", nonExistentIndexUUID)))); tasks.addAll(shardsWithMismatchedAllocationIds); return tasks; } + private ShardRouting nonExistentShardRouting(Index index, List nodeIds, boolean primary) { + return TestShardRouting.newShardRouting(index, 0, randomFrom(nodeIds), primary, randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.RELOCATING, ShardRoutingState.STARTED), randomIntBetween(1, 8)); + } + private static void assertTasksSuccessful( List tasks, ClusterStateTaskExecutor.BatchResult result, ClusterState clusterState, boolean clusterStateChanged ) { - Map taskResultMap = - tasks.stream().collect(Collectors.toMap(Function.identity(), task -> true)); + Map taskResultMap = + tasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.success())); assertTaskResults(taskResultMap, result, clusterState, clusterStateChanged); } private static void assertTaskResults( - Map taskResultMap, + Map taskResultMap, ClusterStateTaskExecutor.BatchResult result, ClusterState clusterState, boolean clusterStateChanged @@ -203,24 +236,29 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa // there should be as many task results as tasks assertEquals(taskResultMap.size(), result.executionResults.size()); - for (Map.Entry entry : taskResultMap.entrySet()) { + for (Map.Entry entry : taskResultMap.entrySet()) { // every task should have a corresponding task result assertTrue(result.executionResults.containsKey(entry.getKey())); // the task results are as expected - assertEquals(entry.getValue(), result.executionResults.get(entry.getKey()).isSuccess()); + assertEquals(entry.getValue().isSuccess(), result.executionResults.get(entry.getKey()).isSuccess()); } - // every shard that we requested to be successfully failed is - // gone List shards = clusterState.getRoutingTable().allShards(); - for (Map.Entry entry : taskResultMap.entrySet()) { - if (entry.getValue()) { + for (Map.Entry entry : taskResultMap.entrySet()) { + if (entry.getValue().isSuccess()) { + // the shard was successfully failed and so should not + // be in the routing table for (ShardRouting shard : shards) { if (entry.getKey().getShardRouting().allocationId() != null) { assertThat(shard.allocationId(), not(equalTo(entry.getKey().getShardRouting().allocationId()))); } } + } else { + // check we saw the expected failure + ClusterStateTaskExecutor.TaskResult actualResult = result.executionResults.get(entry.getKey()); + assertThat(actualResult.getFailure(), instanceOf(entry.getValue().getFailure().getClass())); + assertThat(actualResult.getFailure().getMessage(), equalTo(entry.getValue().getFailure().getMessage())); } } @@ -231,11 +269,49 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa } } - private static List toTasks(List shards, String indexUUID, String message) { + private static List toTasks(ClusterState currentState, List shards, String indexUUID, String message) { return shards .stream() - .map(shard -> new ShardStateAction.ShardRoutingEntry(shard, indexUUID, message, new CorruptIndexException("simulated", indexUUID))) + .map(shard -> new ShardStateAction.ShardRoutingEntry(shard, randomValidSourceShard(currentState, shard), message, new CorruptIndexException("simulated", indexUUID))) .collect(Collectors.toList()); } + private static ShardRouting randomValidSourceShard(ClusterState currentState, ShardRouting shardRouting) { + // for the request node ID to be valid, either the request is + // from the node the shard is assigned to, or the request is + // from the node holding the primary shard + if (randomBoolean()) { + // request from local node + return shardRouting; + } else { + // request from primary node unless in the case of + // non-existent shards there is not one and we fallback to + // the local node + ShardRouting primaryNodeId = primaryShard(currentState, shardRouting); + return primaryNodeId != null ? primaryNodeId : shardRouting; + } + } + + private static ShardRouting randomInvalidSourceShard(ClusterState currentState, ShardRouting shardRouting) { + ShardRouting primaryShard = primaryShard(currentState, shardRouting); + Set shards = + currentState + .routingTable() + .allShards() + .stream() + .filter(shard -> !shard.isSameAllocation(shardRouting)) + .filter(shard -> !shard.isSameAllocation(primaryShard)) + .collect(Collectors.toSet()); + if (!shards.isEmpty()) { + return randomSubsetOf(1, shards.toArray(new ShardRouting[0])).get(0); + } else { + return + TestShardRouting.newShardRouting(shardRouting.index(), shardRouting.id(), DiscoveryService.generateNodeId(Settings.EMPTY), randomBoolean(), randomFrom(ShardRoutingState.values()), shardRouting.version()); + } + } + + private static ShardRouting primaryShard(ClusterState currentState, ShardRouting shardRouting) { + IndexShardRoutingTable indexShard = currentState.getRoutingTable().shardRoutingTableOrNull(shardRouting.shardId()); + return indexShard == null ? null : indexShard.primaryShard(); + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index 8a13e6e6ddd..62f32e20fec 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.action.shard; import org.apache.lucene.index.CorruptIndexException; +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; @@ -29,11 +30,12 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.transport.CapturingTransport; @@ -55,7 +57,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongConsumer; -import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -127,15 +128,13 @@ public class ShardStateActionTests extends ESTestCase { public void testSuccess() throws InterruptedException { final String index = "test"; - clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); - - String indexUUID = clusterService.state().metaData().index(index).getIndexUUID(); + clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); AtomicBoolean success = new AtomicBoolean(); CountDownLatch latch = new CountDownLatch(1); ShardRouting shardRouting = getRandomShardRouting(index); - shardStateAction.shardFailed(shardRouting, indexUUID, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + shardStateAction.shardFailed(shardRouting, shardRouting, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { success.set(true); @@ -169,21 +168,20 @@ public class ShardStateActionTests extends ESTestCase { public void testNoMaster() throws InterruptedException { final String index = "test"; - clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); + clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); DiscoveryNodes.Builder noMasterBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); noMasterBuilder.masterNodeId(null); clusterService.setState(ClusterState.builder(clusterService.state()).nodes(noMasterBuilder)); - String indexUUID = clusterService.state().metaData().index(index).getIndexUUID(); - CountDownLatch latch = new CountDownLatch(1); AtomicInteger retries = new AtomicInteger(); AtomicBoolean success = new AtomicBoolean(); setUpMasterRetryVerification(1, retries, latch, requestId -> {}); - shardStateAction.shardFailed(getRandomShardRouting(index), indexUUID, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + ShardRouting failedShard = getRandomShardRouting(index); + shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { success.set(true); @@ -207,9 +205,7 @@ public class ShardStateActionTests extends ESTestCase { public void testMasterChannelException() throws InterruptedException { final String index = "test"; - clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); - - String indexUUID = clusterService.state().metaData().index(index).getIndexUUID(); + clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); CountDownLatch latch = new CountDownLatch(1); AtomicInteger retries = new AtomicInteger(); @@ -233,7 +229,8 @@ public class ShardStateActionTests extends ESTestCase { final int numberOfRetries = randomIntBetween(1, 256); setUpMasterRetryVerification(numberOfRetries, retries, latch, retryLoop); - shardStateAction.shardFailed(getRandomShardRouting(index), indexUUID, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + ShardRouting failedShard = getRandomShardRouting(index); + shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { success.set(true); @@ -264,13 +261,12 @@ public class ShardStateActionTests extends ESTestCase { public void testUnhandledFailure() { final String index = "test"; - clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); - - String indexUUID = clusterService.state().metaData().index(index).getIndexUUID(); + clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); AtomicBoolean failure = new AtomicBoolean(); - shardStateAction.shardFailed(getRandomShardRouting(index), indexUUID, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + ShardRouting failedShard = getRandomShardRouting(index); + shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { failure.set(false); @@ -294,9 +290,7 @@ public class ShardStateActionTests extends ESTestCase { public void testShardNotFound() throws InterruptedException { final String index = "test"; - clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); - - String indexUUID = clusterService.state().metaData().index(index).getIndexUUID(); + clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); AtomicBoolean success = new AtomicBoolean(); CountDownLatch latch = new CountDownLatch(1); @@ -304,7 +298,7 @@ public class ShardStateActionTests extends ESTestCase { ShardRouting failedShard = getRandomShardRouting(index); RoutingTable routingTable = RoutingTable.builder(clusterService.state().getRoutingTable()).remove(index).build(); clusterService.setState(ClusterState.builder(clusterService.state()).routingTable(routingTable)); - shardStateAction.shardFailed(failedShard, indexUUID, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { success.set(true); @@ -326,6 +320,44 @@ public class ShardStateActionTests extends ESTestCase { assertTrue(success.get()); } + public void testNoLongerPrimaryShardException() throws InterruptedException { + final String index = "test"; + + clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); + + ShardRouting failedShard = getRandomShardRouting(index); + + String nodeId = randomFrom(clusterService.state().nodes().nodes().keys().toArray(String.class)); + + AtomicReference failure = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + + ShardRouting sourceFailedShard = TestShardRouting.newShardRouting(failedShard.index(), failedShard.id(), nodeId, randomBoolean(), randomFrom(ShardRoutingState.values()), failedShard.version()); + shardStateAction.shardFailed(failedShard, sourceFailedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + @Override + public void onSuccess() { + failure.set(null); + latch.countDown(); + } + + @Override + public void onFailure(Throwable t) { + failure.set(t); + latch.countDown(); + } + }); + + ShardStateAction.NoLongerPrimaryShardException catastrophicError = + new ShardStateAction.NoLongerPrimaryShardException(failedShard.shardId(), "source shard [" + sourceFailedShard + " is neither the local allocation nor the primary allocation"); + CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); + transport.handleRemoteError(capturedRequests[0].requestId, catastrophicError); + + latch.await(); + assertNotNull(failure.get()); + assertThat(failure.get(), instanceOf(ShardStateAction.NoLongerPrimaryShardException.class)); + assertThat(failure.get().getMessage(), equalTo(catastrophicError.getMessage())); + } + private ShardRouting getRandomShardRouting(String index) { IndexRoutingTable indexRoutingTable = clusterService.state().routingTable().index(index); ShardsIterator shardsIterator = indexRoutingTable.randomAllActiveShardsIt(); diff --git a/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java b/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java index 259ee109f0f..3f5562fff61 100644 --- a/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java +++ b/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java @@ -19,9 +19,6 @@ package org.elasticsearch.common.cli; -import java.nio.file.NoSuchFileException; -import java.util.List; - import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; @@ -46,22 +43,9 @@ public class TerminalTests extends CliToolTestCase { assertPrinted(terminal, Terminal.Verbosity.VERBOSE, "text"); } - public void testError() throws Exception { - try { - // actually throw so we have a stacktrace - throw new NoSuchFileException("/path/to/some/file"); - } catch (NoSuchFileException e) { - CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL); - terminal.printError(e); - List output = terminal.getTerminalOutput(); - assertFalse(output.isEmpty()); - assertTrue(output.get(0), output.get(0).contains("NoSuchFileException")); // exception class - assertTrue(output.get(0), output.get(0).contains("/path/to/some/file")); // message - assertEquals(1, output.size()); - - // TODO: we should test stack trace is printed in debug mode...except debug is a sysprop instead of - // a command line param...maybe it should be VERBOSE instead of a separate debug prop? - } + public void testEscaping() throws Exception { + CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL); + assertPrinted(terminal, Terminal.Verbosity.NORMAL, "This message contains percent like %20n"); } private void assertPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) { diff --git a/core/src/test/java/org/elasticsearch/common/io/FileSystemUtilsTests.java b/core/src/test/java/org/elasticsearch/common/io/FileSystemUtilsTests.java index 4f2b8f6811c..b6266773bf0 100644 --- a/core/src/test/java/org/elasticsearch/common/io/FileSystemUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/common/io/FileSystemUtilsTests.java @@ -48,91 +48,6 @@ public class FileSystemUtilsTests extends ESTestCase { dst = createTempDir(); Files.createDirectories(src); Files.createDirectories(dst); - - // We first copy sources test files from src/test/resources - // Because after when the test runs, src files are moved to their destination - final Path path = getDataPath("/org/elasticsearch/common/io/copyappend"); - FileSystemUtils.copyDirectoryRecursively(path, src); - } - - public void testMoveOverExistingFileAndAppend() throws IOException { - - FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v1"), dst, ".new"); - assertFileContent(dst, "file1.txt", "version1"); - assertFileContent(dst, "dir/file2.txt", "version1"); - - FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v2"), dst, ".new"); - assertFileContent(dst, "file1.txt", "version1"); - assertFileContent(dst, "dir/file2.txt", "version1"); - assertFileContent(dst, "file1.txt.new", "version2"); - assertFileContent(dst, "dir/file2.txt.new", "version2"); - assertFileContent(dst, "file3.txt", "version1"); - assertFileContent(dst, "dir/subdir/file4.txt", "version1"); - - FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v3"), dst, ".new"); - assertFileContent(dst, "file1.txt", "version1"); - assertFileContent(dst, "dir/file2.txt", "version1"); - assertFileContent(dst, "file1.txt.new", "version3"); - assertFileContent(dst, "dir/file2.txt.new", "version3"); - assertFileContent(dst, "file3.txt", "version1"); - assertFileContent(dst, "dir/subdir/file4.txt", "version1"); - assertFileContent(dst, "file3.txt.new", "version2"); - assertFileContent(dst, "dir/subdir/file4.txt.new", "version2"); - assertFileContent(dst, "dir/subdir/file5.txt", "version1"); - } - - public void testMoveOverExistingFileAndIgnore() throws IOException { - Path dest = createTempDir(); - - FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v1"), dest, null); - assertFileContent(dest, "file1.txt", "version1"); - assertFileContent(dest, "dir/file2.txt", "version1"); - - FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v2"), dest, null); - assertFileContent(dest, "file1.txt", "version1"); - assertFileContent(dest, "dir/file2.txt", "version1"); - assertFileContent(dest, "file1.txt.new", null); - assertFileContent(dest, "dir/file2.txt.new", null); - assertFileContent(dest, "file3.txt", "version1"); - assertFileContent(dest, "dir/subdir/file4.txt", "version1"); - - FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v3"), dest, null); - assertFileContent(dest, "file1.txt", "version1"); - assertFileContent(dest, "dir/file2.txt", "version1"); - assertFileContent(dest, "file1.txt.new", null); - assertFileContent(dest, "dir/file2.txt.new", null); - assertFileContent(dest, "file3.txt", "version1"); - assertFileContent(dest, "dir/subdir/file4.txt", "version1"); - assertFileContent(dest, "file3.txt.new", null); - assertFileContent(dest, "dir/subdir/file4.txt.new", null); - assertFileContent(dest, "dir/subdir/file5.txt", "version1"); - } - - public void testMoveFilesDoesNotCreateSameFileWithSuffix() throws Exception { - Path[] dirs = new Path[] { createTempDir(), createTempDir(), createTempDir()}; - for (Path dir : dirs) { - Files.write(dir.resolve("file1.txt"), "file1".getBytes(StandardCharsets.UTF_8)); - Files.createDirectory(dir.resolve("dir")); - Files.write(dir.resolve("dir").resolve("file2.txt"), "file2".getBytes(StandardCharsets.UTF_8)); - } - - FileSystemUtils.moveFilesWithoutOverwriting(dirs[0], dst, ".new"); - assertFileContent(dst, "file1.txt", "file1"); - assertFileContent(dst, "dir/file2.txt", "file2"); - - // do the same operation again, make sure, no .new files have been added - FileSystemUtils.moveFilesWithoutOverwriting(dirs[1], dst, ".new"); - assertFileContent(dst, "file1.txt", "file1"); - assertFileContent(dst, "dir/file2.txt", "file2"); - assertFileNotExists(dst.resolve("file1.txt.new")); - assertFileNotExists(dst.resolve("dir").resolve("file2.txt.new")); - - // change file content, make sure it gets updated - Files.write(dirs[2].resolve("dir").resolve("file2.txt"), "UPDATED".getBytes(StandardCharsets.UTF_8)); - FileSystemUtils.moveFilesWithoutOverwriting(dirs[2], dst, ".new"); - assertFileContent(dst, "file1.txt", "file1"); - assertFileContent(dst, "dir/file2.txt", "file2"); - assertFileContent(dst, "dir/file2.txt.new", "UPDATED"); } public void testAppend() { diff --git a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index bf55a330509..1735515bf3a 100644 --- a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -37,8 +37,8 @@ import java.util.Arrays; public class BigArraysTests extends ESSingleNodeTestCase { - public static BigArrays randombigArrays() { - final PageCacheRecycler recycler = randomBoolean() ? null : ESSingleNodeTestCase.getInstanceFromNode(PageCacheRecycler.class); + private BigArrays randombigArrays() { + final PageCacheRecycler recycler = randomBoolean() ? null : getInstanceFromNode(PageCacheRecycler.class); return new MockBigArrays(recycler, new NoneCircuitBreakerService()); } diff --git a/core/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java b/core/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java index a26a06a09a3..01c27a65ab8 100644 --- a/core/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java @@ -25,6 +25,8 @@ import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.TestUtil; +import org.elasticsearch.cache.recycler.PageCacheRecycler; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESSingleNodeTestCase; import java.util.HashMap; @@ -38,13 +40,18 @@ public class BytesRefHashTests extends ESSingleNodeTestCase { BytesRefHash hash; + private BigArrays randombigArrays() { + final PageCacheRecycler recycler = randomBoolean() ? null : getInstanceFromNode(PageCacheRecycler.class); + return new MockBigArrays(recycler, new NoneCircuitBreakerService()); + } + private void newHash() { if (hash != null) { hash.close(); } // Test high load factors to make sure that collision resolution works fine final float maxLoadFactor = 0.6f + randomFloat() * 0.39f; - hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randombigArrays()); + hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, randombigArrays()); } @Override diff --git a/core/src/test/java/org/elasticsearch/common/util/LongHashTests.java b/core/src/test/java/org/elasticsearch/common/util/LongHashTests.java index f5ae388db77..9439044a7be 100644 --- a/core/src/test/java/org/elasticsearch/common/util/LongHashTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/LongHashTests.java @@ -22,6 +22,8 @@ package org.elasticsearch.common.util; import com.carrotsearch.hppc.LongLongHashMap; import com.carrotsearch.hppc.LongLongMap; import com.carrotsearch.hppc.cursors.LongLongCursor; +import org.elasticsearch.cache.recycler.PageCacheRecycler; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESSingleNodeTestCase; import java.util.HashMap; @@ -33,6 +35,11 @@ import java.util.Set; public class LongHashTests extends ESSingleNodeTestCase { LongHash hash; + private BigArrays randombigArrays() { + final PageCacheRecycler recycler = randomBoolean() ? null : getInstanceFromNode(PageCacheRecycler.class); + return new MockBigArrays(recycler, new NoneCircuitBreakerService()); + } + private void newHash() { if (hash != null) { hash.close(); @@ -40,7 +47,7 @@ public class LongHashTests extends ESSingleNodeTestCase { // Test high load factors to make sure that collision resolution works fine final float maxLoadFactor = 0.6f + randomFloat() * 0.39f; - hash = new LongHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randombigArrays()); + hash = new LongHash(randomIntBetween(0, 100), maxLoadFactor, randombigArrays()); } @Override diff --git a/core/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java b/core/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java index bf091828ca5..1775f86199a 100644 --- a/core/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java @@ -20,12 +20,20 @@ package org.elasticsearch.common.util; import com.carrotsearch.hppc.LongObjectHashMap; +import org.elasticsearch.cache.recycler.PageCacheRecycler; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESSingleNodeTestCase; public class LongObjectHashMapTests extends ESSingleNodeTestCase { + + private BigArrays randombigArrays() { + final PageCacheRecycler recycler = randomBoolean() ? null : getInstanceFromNode(PageCacheRecycler.class); + return new MockBigArrays(recycler, new NoneCircuitBreakerService()); + } + public void testDuel() { final LongObjectHashMap map1 = new LongObjectHashMap<>(); - final LongObjectPagedHashMap map2 = new LongObjectPagedHashMap<>(randomInt(42), 0.6f + randomFloat() * 0.39f, BigArraysTests.randombigArrays()); + final LongObjectPagedHashMap map2 = new LongObjectPagedHashMap<>(randomInt(42), 0.6f + randomFloat() * 0.39f, randombigArrays()); final int maxKey = randomIntBetween(1, 10000); final int iters = scaledRandomIntBetween(10000, 100000); for (int i = 0; i < iters; ++i) { diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/SuspendableRefContainerTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/SuspendableRefContainerTests.java new file mode 100644 index 00000000000..83db2d4a7c6 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/SuspendableRefContainerTests.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util.concurrent; + +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class SuspendableRefContainerTests extends ESTestCase { + + public void testBasicAcquire() throws InterruptedException { + SuspendableRefContainer refContainer = new SuspendableRefContainer(); + assertThat(refContainer.activeRefs(), equalTo(0)); + + Releasable lock1 = randomLockingMethod(refContainer); + assertThat(refContainer.activeRefs(), equalTo(1)); + Releasable lock2 = randomLockingMethod(refContainer); + assertThat(refContainer.activeRefs(), equalTo(2)); + lock1.close(); + assertThat(refContainer.activeRefs(), equalTo(1)); + lock1.close(); // check idempotence + assertThat(refContainer.activeRefs(), equalTo(1)); + lock2.close(); + assertThat(refContainer.activeRefs(), equalTo(0)); + } + + public void testAcquisitionBlockingBlocksNewAcquisitions() throws InterruptedException { + SuspendableRefContainer refContainer = new SuspendableRefContainer(); + assertThat(refContainer.activeRefs(), equalTo(0)); + + try (Releasable block = refContainer.blockAcquisition()) { + assertThat(refContainer.activeRefs(), equalTo(0)); + assertThat(refContainer.tryAcquire(), nullValue()); + assertThat(refContainer.activeRefs(), equalTo(0)); + } + try (Releasable lock = refContainer.tryAcquire()) { + assertThat(refContainer.activeRefs(), equalTo(1)); + } + + // same with blocking acquire + AtomicBoolean acquired = new AtomicBoolean(); + Thread t = new Thread(() -> { + try (Releasable lock = randomBoolean() ? refContainer.acquire() : refContainer.acquireUninterruptibly()) { + acquired.set(true); + assertThat(refContainer.activeRefs(), equalTo(1)); + } catch (InterruptedException e) { + fail("Interrupted"); + } + }); + try (Releasable block = refContainer.blockAcquisition()) { + assertThat(refContainer.activeRefs(), equalTo(0)); + t.start(); + // check that blocking acquire really blocks + assertThat(acquired.get(), equalTo(false)); + assertThat(refContainer.activeRefs(), equalTo(0)); + } + t.join(); + assertThat(acquired.get(), equalTo(true)); + assertThat(refContainer.activeRefs(), equalTo(0)); + } + + public void testAcquisitionBlockingWaitsOnExistingAcquisitions() throws InterruptedException { + SuspendableRefContainer refContainer = new SuspendableRefContainer(); + + AtomicBoolean acquired = new AtomicBoolean(); + Thread t = new Thread(() -> { + try (Releasable block = refContainer.blockAcquisition()) { + acquired.set(true); + assertThat(refContainer.activeRefs(), equalTo(0)); + } + }); + try (Releasable lock = randomLockingMethod(refContainer)) { + assertThat(refContainer.activeRefs(), equalTo(1)); + t.start(); + assertThat(acquired.get(), equalTo(false)); + assertThat(refContainer.activeRefs(), equalTo(1)); + } + t.join(); + assertThat(acquired.get(), equalTo(true)); + assertThat(refContainer.activeRefs(), equalTo(0)); + } + + private Releasable randomLockingMethod(SuspendableRefContainer refContainer) throws InterruptedException { + switch (randomInt(2)) { + case 0: return refContainer.tryAcquire(); + case 1: return refContainer.acquire(); + case 2: return refContainer.acquireUninterruptibly(); + } + throw new IllegalArgumentException("randomLockingMethod inconsistent"); + } +} diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index c282f3ef183..739e07df4a9 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -56,7 +56,6 @@ import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.store.IndicesStoreIntegrationIT; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -905,7 +904,6 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ShardRouting failedShard = randomFrom(clusterService().state().getRoutingNodes().node(nonMasterNodeId).shardsWithState(ShardRoutingState.STARTED)); ShardStateAction service = internalCluster().getInstance(ShardStateAction.class, nonMasterNode); - String indexUUID = clusterService().state().metaData().index("test").getIndexUUID(); CountDownLatch latch = new CountDownLatch(1); AtomicBoolean success = new AtomicBoolean(); @@ -913,7 +911,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { NetworkPartition networkPartition = addRandomIsolation(isolatedNode); networkPartition.startDisrupting(); - service.shardFailed(failedShard, indexUUID, "simulated", new CorruptIndexException("simulated", (String) null), new ShardStateAction.Listener() { + service.shardFailed(failedShard, failedShard, "simulated", new CorruptIndexException("simulated", (String) null), new ShardStateAction.Listener() { @Override public void onSuccess() { success.set(true); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index 6c564a97740..ee92945c4ff 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -80,20 +80,6 @@ import static org.hamcrest.Matchers.sameInstance; @ESIntegTestCase.SuppressLocalMode @TestLogging("_root:DEBUG") public class ZenDiscoveryIT extends ESIntegTestCase { - public void testChangeRejoinOnMasterOptionIsDynamic() throws Exception { - Settings nodeSettings = Settings.settingsBuilder() - .put("discovery.type", "zen") // <-- To override the local setting if set externally - .build(); - String nodeName = internalCluster().startNode(nodeSettings); - ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName); - assertThat(zenDiscovery.isRejoinOnMasterGone(), is(true)); - - client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING.getKey(), false)) - .get(); - - assertThat(zenDiscovery.isRejoinOnMasterGone(), is(false)); - } public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception { Settings defaultSettings = Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortIT.java b/core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortIT.java index f227a9a03b4..b6cf9d91894 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortIT.java +++ b/core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.node.Node; +import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -41,7 +41,7 @@ public class HttpPublishPortIT extends ESIntegTestCase { return Settings.settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .put("http.publish_port", 9080) + .put(HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.getKey(), 9080) .build(); } diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java index 017eef345a7..6311e56834d 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; @@ -81,7 +82,7 @@ public class NettyHttpChannelTests extends ESTestCase { public void testCorsEnabledWithoutAllowOrigins() { // Set up a HTTP transport with only the CORS enabled setting Settings settings = Settings.builder() - .put(NettyHttpServerTransport.SETTING_CORS_ENABLED.getKey(), true) + .put(HttpTransportSettings.SETTING_CORS_ENABLED.getKey(), true) .build(); httpServerTransport = new NettyHttpServerTransport(settings, networkService, bigArrays, threadPool); HttpRequest httpRequest = new TestHttpRequest(); @@ -104,8 +105,8 @@ public class NettyHttpChannelTests extends ESTestCase { public void testCorsEnabledWithAllowOrigins() { // create a http transport with CORS enabled and allow origin configured Settings settings = Settings.builder() - .put(NettyHttpServerTransport.SETTING_CORS_ENABLED.getKey(), true) - .put(NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN, "remote-host") + .put(HttpTransportSettings.SETTING_CORS_ENABLED.getKey(), true) + .put(HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN.getKey(), "remote-host") .build(); httpServerTransport = new NettyHttpServerTransport(settings, networkService, bigArrays, threadPool); HttpRequest httpRequest = new TestHttpRequest(); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index ca0069e4eda..a77e75d1356 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -57,6 +57,8 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; @@ -68,6 +70,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.NodeServicesProvider; @@ -107,6 +110,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; @@ -124,6 +128,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; /** * Simple unit-test IndexShard related operations. @@ -250,7 +255,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); - routing = TestShardRouting.newShardRouting(shard.shardId.getIndexName(), shard.shardId.id(), routing.currentNodeId(), null, routing.primary(), ShardRoutingState.INITIALIZING, shard.shardRouting.allocationId(), shard.shardRouting.version() + 1); + routing = TestShardRouting.newShardRouting(shard.shardId.getIndex(), shard.shardId.id(), routing.currentNodeId(), null, routing.primary(), ShardRoutingState.INITIALIZING, shard.shardRouting.allocationId(), shard.shardRouting.version() + 1); shard.updateRoutingEntry(routing, true); shard.deleteShardState(); @@ -315,36 +320,41 @@ public class IndexShardTests extends ESSingleNodeTestCase { } - public void testDeleteIndexDecreasesCounter() throws InterruptedException, ExecutionException, IOException { + public void testDeleteIndexPreventsNewOperations() throws InterruptedException, ExecutionException, IOException { assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService indexService = indicesService.indexServiceSafe("test"); IndexShard indexShard = indexService.getShardOrNull(0); client().admin().indices().prepareDelete("test").get(); - assertThat(indexShard.getOperationsCount(), equalTo(0)); + assertThat(indexShard.getActiveOperationsCount(), equalTo(0)); try { - indexShard.incrementOperationCounter(); + indexShard.acquirePrimaryOperationLock(); + fail("we should not be able to increment anymore"); + } catch (IndexShardClosedException e) { + // expected + } + try { + indexShard.acquireReplicaOperationLock(); fail("we should not be able to increment anymore"); } catch (IndexShardClosedException e) { // expected } } - public void testIndexShardCounter() throws InterruptedException, ExecutionException, IOException { + public void testIndexOperationsCounter() throws InterruptedException, ExecutionException, IOException { assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService indexService = indicesService.indexServiceSafe("test"); IndexShard indexShard = indexService.getShardOrNull(0); - assertEquals(0, indexShard.getOperationsCount()); - indexShard.incrementOperationCounter(); - assertEquals(1, indexShard.getOperationsCount()); - indexShard.incrementOperationCounter(); - assertEquals(2, indexShard.getOperationsCount()); - indexShard.decrementOperationCounter(); - indexShard.decrementOperationCounter(); - assertEquals(0, indexShard.getOperationsCount()); + assertEquals(0, indexShard.getActiveOperationsCount()); + Releasable operation1 = indexShard.acquirePrimaryOperationLock(); + assertEquals(1, indexShard.getActiveOperationsCount()); + Releasable operation2 = indexShard.acquirePrimaryOperationLock(); + assertEquals(2, indexShard.getActiveOperationsCount()); + Releasables.close(operation1, operation2); + assertEquals(0, indexShard.getActiveOperationsCount()); } public void testMarkAsInactiveTriggersSyncedFlush() throws Exception { @@ -776,6 +786,89 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertEquals(total + 1, shard.flushStats().getTotal()); } + public void testLockingBeforeAndAfterRelocated() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").setSettings( + Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0) + ).get()); + ensureGreen(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService("test"); + final IndexShard shard = test.getShardOrNull(0); + CountDownLatch latch = new CountDownLatch(1); + Thread recoveryThread = new Thread(() -> { + latch.countDown(); + shard.relocated("simulated recovery"); + }); + + try (Releasable ignored = shard.acquirePrimaryOperationLock()) { + // start finalization of recovery + recoveryThread.start(); + latch.await(); + // recovery can only be finalized after we release the current primaryOperationLock + assertThat(shard.state(), equalTo(IndexShardState.STARTED)); + } + // recovery can be now finalized + recoveryThread.join(); + assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + try (Releasable ignored = shard.acquirePrimaryOperationLock()) { + // lock can again be acquired + assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + } + } + + public void testStressRelocated() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").setSettings( + Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0) + ).get()); + ensureGreen(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService("test"); + final IndexShard shard = test.getShardOrNull(0); + final int numThreads = randomIntBetween(2, 4); + Thread[] indexThreads = new Thread[numThreads]; + CountDownLatch allPrimaryOperationLocksAcquired = new CountDownLatch(numThreads); + CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); + for (int i = 0; i < indexThreads.length; i++) { + indexThreads[i] = new Thread() { + @Override + public void run() { + try (Releasable operationLock = shard.acquirePrimaryOperationLock()) { + allPrimaryOperationLocksAcquired.countDown(); + barrier.await(); + } catch (InterruptedException | BrokenBarrierException e) { + throw new RuntimeException(e); + } + } + }; + indexThreads[i].start(); + } + AtomicBoolean relocated = new AtomicBoolean(); + final Thread recoveryThread = new Thread(() -> { + shard.relocated("simulated recovery"); + relocated.set(true); + }); + // ensure we wait for all primary operation locks to be acquired + allPrimaryOperationLocksAcquired.await(); + // start recovery thread + recoveryThread.start(); + assertThat(relocated.get(), equalTo(false)); + assertThat(shard.getActiveOperationsCount(), greaterThan(0)); + // ensure we only transition to RELOCATED state after pending operations completed + assertThat(shard.state(), equalTo(IndexShardState.STARTED)); + // complete pending operations + barrier.await(); + // complete recovery/relocation + recoveryThread.join(); + // ensure relocated successfully once pending operations are done + assertThat(relocated.get(), equalTo(true)); + assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + assertThat(shard.getActiveOperationsCount(), equalTo(0)); + + for (Thread indexThread : indexThreads) { + indexThread.join(); + } + } + public void testRecoverFromStore() throws IOException { createIndex("test"); ensureGreen(); @@ -856,6 +949,27 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertHitCount(client().prepareSearch().get(), 1); } + public void testRecoveryFailsAfterMovingToRelocatedState() throws InterruptedException { + createIndex("test"); + ensureGreen(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService("test"); + final IndexShard shard = test.getShardOrNull(0); + ShardRouting origRouting = shard.routingEntry(); + assertThat(shard.state(), equalTo(IndexShardState.STARTED)); + ShardRouting inRecoveryRouting = new ShardRouting(origRouting); + ShardRoutingHelper.relocate(inRecoveryRouting, "some_node"); + shard.updateRoutingEntry(inRecoveryRouting, true); + shard.relocated("simulate mark as relocated"); + assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + ShardRouting failedRecoveryRouting = new ShardRouting(origRouting); + try { + shard.updateRoutingEntry(failedRecoveryRouting, true); + fail("Expected IndexShardRelocatedException"); + } catch (IndexShardRelocatedException expected) { + } + } + public void testRestoreShard() throws IOException { createIndex("test"); createIndex("test_target"); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index f1f8a8222cb..b074729cdff 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -58,6 +58,7 @@ import static org.elasticsearch.index.shard.IndexShardState.CLOSED; import static org.elasticsearch.index.shard.IndexShardState.CREATED; import static org.elasticsearch.index.shard.IndexShardState.POST_RECOVERY; import static org.elasticsearch.index.shard.IndexShardState.RECOVERING; +import static org.elasticsearch.index.shard.IndexShardState.RELOCATED; import static org.elasticsearch.index.shard.IndexShardState.STARTED; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.CoreMatchers.equalTo; @@ -181,7 +182,7 @@ public class IndicesLifecycleListenerIT extends ESIntegTestCase { ensureGreen(); //the 3 relocated shards get closed on the first node - assertShardStatesMatch(stateChangeListenerNode1, 3, CLOSED); + assertShardStatesMatch(stateChangeListenerNode1, 3, RELOCATED, CLOSED); //the 3 relocated shards get created on the second node assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index c30a5adaaca..239cb7a9096 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; @@ -110,8 +111,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); final ShardId shardId = shard.shardId(); - shard.incrementOperationCounter(); - try { + try (Releasable operationLock = shard.acquirePrimaryOperationLock()) { SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); flushService.attemptSyncedFlush(shardId, listener); listener.latch.await(); @@ -121,8 +121,6 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { assertEquals(0, syncedFlushResult.successfulShards()); assertNotEquals(0, syncedFlushResult.totalShards()); assertEquals("[1] ongoing operations on primary", syncedFlushResult.failureReason()); - } finally { - shard.decrementOperationCounter(); } } diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java new file mode 100644 index 00000000000..727641eb224 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.recovery; + +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.equalTo; + +@TestLogging("_root:DEBUG") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +public class IndexPrimaryRelocationIT extends ESIntegTestCase { + + private static final int RELOCATION_COUNT = 25; + + public void testPrimaryRelocationWhileIndexing() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); + client().admin().indices().prepareCreate("test") + .setSettings(Settings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)) + .addMapping("type", "field", "type=string") + .get(); + ensureGreen("test"); + + final AtomicBoolean finished = new AtomicBoolean(false); + Thread indexingThread = new Thread() { + @Override + public void run() { + while (finished.get() == false) { + IndexResponse indexResponse = client().prepareIndex("test", "type", "id").setSource("field", "value").get(); + assertThat("deleted document was found", indexResponse.isCreated(), equalTo(true)); + DeleteResponse deleteResponse = client().prepareDelete("test", "type", "id").get(); + assertThat("indexed document was not found", deleteResponse.isFound(), equalTo(true)); + } + } + }; + indexingThread.start(); + + ClusterState initialState = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode[] dataNodes = initialState.getNodes().dataNodes().values().toArray(DiscoveryNode.class); + DiscoveryNode relocationSource = initialState.getNodes().dataNodes().get(initialState.getRoutingTable().shardRoutingTable("test", 0).primaryShard().currentNodeId()); + for (int i = 0; i < RELOCATION_COUNT; i++) { + DiscoveryNode relocationTarget = randomFrom(dataNodes); + while (relocationTarget.equals(relocationSource)) { + relocationTarget = randomFrom(dataNodes); + } + logger.info("--> [iteration {}] relocating from {} to {} ", i, relocationSource.getName(), relocationTarget.getName()); + client().admin().cluster().prepareReroute() + .add(new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId())) + .execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).execute().actionGet(); + assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); + logger.info("--> [iteration {}] relocation complete", i); + relocationSource = relocationTarget; + if (indexingThread.isAlive() == false) { // indexing process aborted early, no need for more relocations as test has already failed + break; + } + + } + finished.set(true); + indexingThread.join(); + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 12acea4f9ac..cc11cb82057 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -286,7 +286,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { assertRecoveryState(nodeARecoveryStates.get(0), 0, Type.STORE, Stage.DONE, nodeA, nodeA, false); validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex()); - assertOnGoingRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, nodeA, nodeB, false); + assertOnGoingRecoveryState(nodeBRecoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, nodeA, nodeB, false); validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex()); logger.info("--> request node recovery stats"); @@ -339,7 +339,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); assertThat(recoveryStates.size(), equalTo(1)); - assertRecoveryState(recoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false); + assertRecoveryState(recoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, Stage.DONE, nodeA, nodeB, false); validateIndexRecoveryState(recoveryStates.get(0).getIndex()); statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get(); @@ -400,7 +400,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { assertRecoveryState(nodeARecoveryStates.get(0), 0, Type.REPLICA, Stage.DONE, nodeB, nodeA, false); validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex()); - assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false); + assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, Stage.DONE, nodeA, nodeB, false); validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex()); // relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B) @@ -421,7 +421,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates); assertThat(nodeCRecoveryStates.size(), equalTo(1)); - assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false); + assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, Stage.DONE, nodeA, nodeB, false); validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex()); // relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B) @@ -503,7 +503,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex(INDEX_NAME, INDEX_TYPE). + docs[i] = client().prepareIndex(name, INDEX_TYPE). setSource("foo-int", randomInt(), "foo-string", randomAsciiOfLength(32), "foo-float", randomFloat()); @@ -511,8 +511,8 @@ public class IndexRecoveryIT extends ESIntegTestCase { indexRandom(true, docs); flush(); - assertThat(client().prepareSearch(INDEX_NAME).setSize(0).get().getHits().totalHits(), equalTo((long) numDocs)); - return client().admin().indices().prepareStats(INDEX_NAME).execute().actionGet(); + assertThat(client().prepareSearch(name).setSize(0).get().getHits().totalHits(), equalTo((long) numDocs)); + return client().admin().indices().prepareStats(name).execute().actionGet(); } private void validateIndexRecoveryState(RecoveryState.Index indexState) { diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index c8cad5be296..b29404d59b6 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -69,7 +69,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { StartRecoveryRequest request = new StartRecoveryRequest(shardId, new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT), new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT), - randomBoolean(), null, RecoveryState.Type.STORE, randomLong()); + null, RecoveryState.Type.STORE, randomLong()); Store store = newStore(createTempDir()); RecoverySourceHandler handler = new RecoverySourceHandler(null, request, recoverySettings, null, logger); Directory dir = store.directory(); @@ -118,7 +118,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { StartRecoveryRequest request = new StartRecoveryRequest(shardId, new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT), new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT), - randomBoolean(), null, RecoveryState.Type.STORE, randomLong()); + null, RecoveryState.Type.STORE, randomLong()); Path tempDir = createTempDir(); Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); @@ -181,7 +181,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { StartRecoveryRequest request = new StartRecoveryRequest(shardId, new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT), new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT), - randomBoolean(), null, RecoveryState.Type.STORE, randomLong()); + null, RecoveryState.Type.STORE, randomLong()); Path tempDir = createTempDir(); Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java index c7a7852e426..3406388bd5b 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java @@ -43,11 +43,9 @@ public class StartRecoveryRequestTests extends ESTestCase { new ShardId("test", "_na_", 0), new DiscoveryNode("a", new LocalTransportAddress("1"), targetNodeVersion), new DiscoveryNode("b", new LocalTransportAddress("1"), targetNodeVersion), - true, Store.MetadataSnapshot.EMPTY, - RecoveryState.Type.RELOCATION, + RecoveryState.Type.PRIMARY_RELOCATION, 1L - ); ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); @@ -63,7 +61,6 @@ public class StartRecoveryRequestTests extends ESTestCase { assertThat(outRequest.shardId(), equalTo(inRequest.shardId())); assertThat(outRequest.sourceNode(), equalTo(inRequest.sourceNode())); assertThat(outRequest.targetNode(), equalTo(inRequest.targetNode())); - assertThat(outRequest.markAsRelocated(), equalTo(inRequest.markAsRelocated())); assertThat(outRequest.metadataSnapshot().asMap(), equalTo(inRequest.metadataSnapshot().asMap())); assertThat(outRequest.recoveryId(), equalTo(inRequest.recoveryId())); assertThat(outRequest.recoveryType(), equalTo(inRequest.recoveryType())); diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestClientIT.java b/core/src/test/java/org/elasticsearch/ingest/IngestClientIT.java index bcbe41dd66f..e5fcba2a3be 100644 --- a/core/src/test/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/core/src/test/java/org/elasticsearch/ingest/IngestClientIT.java @@ -19,6 +19,8 @@ package org.elasticsearch.ingest; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -38,11 +40,13 @@ import org.elasticsearch.ingest.core.IngestDocument; import org.elasticsearch.node.NodeModule; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.RemoteTransportException; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -201,23 +205,7 @@ public class IngestClientIT extends ESIntegTestCase { assertThat(getResponse.pipelines().size(), equalTo(0)); } - public void testPutWithPipelineError() throws Exception { - BytesReference source = jsonBuilder().startObject() - .field("description", "my_pipeline") - .startArray("processors") - .startObject() - .startObject("not_found") - .endObject() - .endObject() - .endArray() - .endObject().bytes(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source); - WritePipelineResponse response = client().admin().cluster().putPipeline(putPipelineRequest).get(); - assertThat(response.isAcknowledged(), equalTo(false)); - assertThat(response.getError().getReason(), equalTo("No processor type exists with name [not_found]")); - } - - public void testPutWithProcessorFactoryError() throws Exception { + public void testPutWithPipelineFactoryError() throws Exception { BytesReference source = jsonBuilder().startObject() .field("description", "my_pipeline") .startArray("processors") @@ -229,9 +217,13 @@ public class IngestClientIT extends ESIntegTestCase { .endArray() .endObject().bytes(); PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source); - WritePipelineResponse response = client().admin().cluster().putPipeline(putPipelineRequest).get(); - assertThat(response.isAcknowledged(), equalTo(false)); - assertThat(response.getError().getReason(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); + try { + client().admin().cluster().putPipeline(putPipelineRequest).get(); + } catch (ExecutionException e) { + ElasticsearchParseException ex = (ElasticsearchParseException) ExceptionsHelper.unwrap(e, ElasticsearchParseException.class); + assertNotNull(ex); + assertThat(ex.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); + } } @Override diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java index a75a84f0379..bdf1f7d49c1 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java @@ -19,10 +19,10 @@ package org.elasticsearch.ingest; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; @@ -103,42 +103,21 @@ public class PipelineStoreTests extends ESTestCase { } public void testPutWithErrorResponse() { + String id = "_id"; + Pipeline pipeline = store.get(id); + assertThat(pipeline, nullValue()); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - } - - public void testConstructPipelineResponseSuccess() { - Map processorConfig = new HashMap<>(); - processorConfig.put("field", "foo"); - processorConfig.put("value", "bar"); - Map pipelineConfig = new HashMap<>(); - pipelineConfig.put("description", "_description"); - pipelineConfig.put("processors", Collections.singletonList(Collections.singletonMap("set", processorConfig))); - WritePipelineResponse response = store.validatePipelineResponse("test_id", pipelineConfig); - assertThat(response, nullValue()); - } - - public void testConstructPipelineResponseMissingProcessorsFieldException() { - Map pipelineConfig = new HashMap<>(); - pipelineConfig.put("description", "_description"); - WritePipelineResponse response = store.validatePipelineResponse("test_id", pipelineConfig); - assertThat(response.getError().getProcessorType(), is(nullValue())); - assertThat(response.getError().getProcessorTag(), is(nullValue())); - assertThat(response.getError().getProcessorPropertyName(), equalTo("processors")); - assertThat(response.getError().getReason(), equalTo("[processors] required property is missing")); - } - - public void testConstructPipelineResponseConfigurationException() { - Map processorConfig = new HashMap<>(); - processorConfig.put("field", "foo"); - Map pipelineConfig = new HashMap<>(); - pipelineConfig.put("description", "_description"); - pipelineConfig.put("processors", Collections.singletonList(Collections.singletonMap("set", processorConfig))); - WritePipelineResponse response = store.validatePipelineResponse("test_id", pipelineConfig); - - assertThat(response.getError().getProcessorTag(), nullValue()); - assertThat(response.getError().getProcessorType(), equalTo("set")); - assertThat(response.getError().getProcessorPropertyName(), equalTo("value")); - assertThat(response.getError().getReason(), equalTo("[value] required property is missing")); + PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"description\": \"empty processors\"}")); + clusterState = store.innerPut(putRequest, clusterState); + try { + store.innerUpdatePipelines(clusterState); + fail("should fail"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), equalTo("[processors] required property is missing")); + } + pipeline = store.get(id); + assertThat(pipeline, nullValue()); } public void testDelete() { diff --git a/core/src/test/java/org/elasticsearch/ingest/core/CompoundProcessorTests.java b/core/src/test/java/org/elasticsearch/ingest/core/CompoundProcessorTests.java index f21644e6005..7bc8922af41 100644 --- a/core/src/test/java/org/elasticsearch/ingest/core/CompoundProcessorTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/core/CompoundProcessorTests.java @@ -80,9 +80,10 @@ public class CompoundProcessorTests extends ESTestCase { TestProcessor processor1 = new TestProcessor("id", "first", ingestDocument -> {throw new RuntimeException("error");}); TestProcessor processor2 = new TestProcessor(ingestDocument -> { Map ingestMetadata = ingestDocument.getIngestMetadata(); - assertThat(ingestMetadata.size(), equalTo(2)); + assertThat(ingestMetadata.size(), equalTo(3)); assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo("error")); - assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_FIELD), equalTo("first")); + assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo("first")); + assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo("id")); }); CompoundProcessor compoundProcessor = new CompoundProcessor(Collections.singletonList(processor1), Collections.singletonList(processor2)); @@ -94,18 +95,20 @@ public class CompoundProcessorTests extends ESTestCase { public void testSingleProcessorWithNestedFailures() throws Exception { TestProcessor processor = new TestProcessor("id", "first", ingestDocument -> {throw new RuntimeException("error");}); - TestProcessor processorToFail = new TestProcessor("id", "second", ingestDocument -> { + TestProcessor processorToFail = new TestProcessor("id2", "second", ingestDocument -> { Map ingestMetadata = ingestDocument.getIngestMetadata(); - assertThat(ingestMetadata.size(), equalTo(2)); + assertThat(ingestMetadata.size(), equalTo(3)); assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo("error")); - assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_FIELD), equalTo("first")); + assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo("first")); + assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo("id")); throw new RuntimeException("error"); }); TestProcessor lastProcessor = new TestProcessor(ingestDocument -> { Map ingestMetadata = ingestDocument.getIngestMetadata(); - assertThat(ingestMetadata.size(), equalTo(2)); + assertThat(ingestMetadata.size(), equalTo(3)); assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo("error")); - assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_FIELD), equalTo("second")); + assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo("second")); + assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo("id2")); }); CompoundProcessor compoundOnFailProcessor = new CompoundProcessor(Collections.singletonList(processorToFail), Collections.singletonList(lastProcessor)); CompoundProcessor compoundProcessor = new CompoundProcessor(Collections.singletonList(processor), Collections.singletonList(compoundOnFailProcessor)); diff --git a/core/src/test/java/org/elasticsearch/ingest/core/ConfigurationUtilsTests.java b/core/src/test/java/org/elasticsearch/ingest/core/ConfigurationUtilsTests.java index 954a03c2172..722f14e396e 100644 --- a/core/src/test/java/org/elasticsearch/ingest/core/ConfigurationUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/core/ConfigurationUtilsTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.ingest.core; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -58,7 +58,7 @@ public class ConfigurationUtilsTests extends ESTestCase { public void testReadStringPropertyInvalidType() { try { ConfigurationUtils.readStringProperty(null, null, config, "arr"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[arr] property isn't a string, but of type [java.util.Arrays$ArrayList]")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/core/PipelineFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/core/PipelineFactoryTests.java index 746ac2f5617..04f887e9383 100644 --- a/core/src/test/java/org/elasticsearch/ingest/core/PipelineFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/core/PipelineFactoryTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.ingest.core; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.TestProcessor; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -59,7 +59,7 @@ public class PipelineFactoryTests extends ESTestCase { try { factory.create("_id", pipelineConfig, Collections.emptyMap()); fail("should fail, missing required [processors] field"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[processors] required property is missing")); } } @@ -91,7 +91,7 @@ public class PipelineFactoryTests extends ESTestCase { Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); try { factory.create("_id", pipelineConfig, processorRegistry); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/AppendProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/AppendProcessorFactoryTests.java index c4c13a6ab7d..7350e3d9c43 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/AppendProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/AppendProcessorFactoryTests.java @@ -19,9 +19,9 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.ingest.core.AbstractProcessorFactory; -import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -65,7 +65,7 @@ public class AppendProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } @@ -76,7 +76,7 @@ public class AppendProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[value] required property is missing")); } } @@ -88,7 +88,7 @@ public class AppendProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[value] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorFactoryTests.java index a07cec5c4e7..831e87436ba 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; @@ -28,6 +29,7 @@ import java.util.HashMap; import java.util.Map; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.nullValue; public class ConvertProcessorFactoryTests extends ESTestCase { @@ -54,8 +56,11 @@ public class ConvertProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), Matchers.equalTo("type [" + type + "] not supported, cannot convert field.")); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), Matchers.equalTo("[type] type [" + type + "] not supported, cannot convert field.")); + assertThat(e.getHeader("processor_type").get(0), equalTo(ConvertProcessor.TYPE)); + assertThat(e.getHeader("property_name").get(0), equalTo("type")); + assertThat(e.getHeader("processor_tag"), nullValue()); } } @@ -67,7 +72,7 @@ public class ConvertProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), Matchers.equalTo("[field] required property is missing")); } } @@ -79,7 +84,7 @@ public class ConvertProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), Matchers.equalTo("[type] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/DateProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/DateProcessorFactoryTests.java index 1139f1968f7..7ea1de17fc0 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/DateProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/DateProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; @@ -64,7 +65,7 @@ public class DateProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("processor creation should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), containsString("[match_field] required property is missing")); } } @@ -80,7 +81,7 @@ public class DateProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("processor creation should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), containsString("[match_formats] required property is missing")); } } @@ -170,7 +171,7 @@ public class DateProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("processor creation should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), containsString("[match_formats] property isn't a list, but of type [java.lang.String]")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/DeDotProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/DeDotProcessorFactoryTests.java deleted file mode 100644 index 63eee56cc68..00000000000 --- a/core/src/test/java/org/elasticsearch/ingest/processor/DeDotProcessorFactoryTests.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest.processor; - -import org.elasticsearch.ingest.core.AbstractProcessorFactory; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.util.HashMap; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.equalTo; - -public class DeDotProcessorFactoryTests extends ESTestCase { - - private DeDotProcessor.Factory factory; - - @Before - public void init() { - factory = new DeDotProcessor.Factory(); - } - - public void testCreate() throws Exception { - Map config = new HashMap<>(); - config.put("separator", "_"); - String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - DeDotProcessor deDotProcessor = factory.create(config); - assertThat(deDotProcessor.getSeparator(), equalTo("_")); - assertThat(deDotProcessor.getTag(), equalTo(processorTag)); - } - - public void testCreateMissingSeparatorField() throws Exception { - Map config = new HashMap<>(); - DeDotProcessor deDotProcessor = factory.create(config); - assertThat(deDotProcessor.getSeparator(), equalTo(DeDotProcessor.DEFAULT_SEPARATOR)); - } - -} diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/DeDotProcessorTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/DeDotProcessorTests.java deleted file mode 100644 index a0c87d7a16b..00000000000 --- a/core/src/test/java/org/elasticsearch/ingest/processor/DeDotProcessorTests.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest.processor; - -import org.elasticsearch.ingest.core.IngestDocument; -import org.elasticsearch.ingest.core.Processor; -import org.elasticsearch.test.ESTestCase; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; - -public class DeDotProcessorTests extends ESTestCase { - - public void testSimple() throws Exception { - Map source = new HashMap<>(); - source.put("a.b", "hello world!"); - IngestDocument ingestDocument = new IngestDocument(source, Collections.emptyMap()); - String separator = randomUnicodeOfCodepointLengthBetween(1, 10); - Processor processor = new DeDotProcessor(randomAsciiOfLength(10), separator); - processor.execute(ingestDocument); - assertThat(ingestDocument.getSourceAndMetadata().get("a" + separator + "b" ), equalTo("hello world!")); - } - - public void testSimpleMap() throws Exception { - Map source = new HashMap<>(); - Map subField = new HashMap<>(); - subField.put("b.c", "hello world!"); - source.put("a", subField); - IngestDocument ingestDocument = new IngestDocument(source, Collections.emptyMap()); - Processor processor = new DeDotProcessor(randomAsciiOfLength(10), "_"); - processor.execute(ingestDocument); - - IngestDocument expectedDocument = new IngestDocument( - Collections.singletonMap("a", Collections.singletonMap("b_c", "hello world!")), - Collections.emptyMap()); - assertThat(ingestDocument, equalTo(expectedDocument)); - } - - public void testSimpleList() throws Exception { - Map source = new HashMap<>(); - Map subField = new HashMap<>(); - subField.put("b.c", "hello world!"); - source.put("a", Arrays.asList(subField)); - IngestDocument ingestDocument = new IngestDocument(source, Collections.emptyMap()); - Processor processor = new DeDotProcessor(randomAsciiOfLength(10), "_"); - processor.execute(ingestDocument); - - IngestDocument expectedDocument = new IngestDocument( - Collections.singletonMap("a", - Collections.singletonList(Collections.singletonMap("b_c", "hello world!"))), - Collections.emptyMap()); - assertThat(ingestDocument, equalTo(expectedDocument)); - } -} diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/FailProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/FailProcessorFactoryTests.java index 661a6383dfd..0d88710c80d 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/FailProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/FailProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; @@ -55,7 +56,7 @@ public class FailProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[message] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/GsubProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/GsubProcessorFactoryTests.java index bce033091ac..2440ff68408 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/GsubProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/GsubProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; @@ -53,7 +54,7 @@ public class GsubProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } @@ -66,7 +67,7 @@ public class GsubProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[pattern] required property is missing")); } } @@ -79,7 +80,7 @@ public class GsubProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[replacement] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/JoinProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/JoinProcessorFactoryTests.java index 51eb989beda..c374b8a3318 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/JoinProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/JoinProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; @@ -50,7 +51,7 @@ public class JoinProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } @@ -62,7 +63,7 @@ public class JoinProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[separator] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/LowercaseProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/LowercaseProcessorFactoryTests.java index 32eefa07896..09d676b3b30 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/LowercaseProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/LowercaseProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; @@ -47,7 +48,7 @@ public class LowercaseProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/RemoveProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/RemoveProcessorFactoryTests.java index 5b03d288064..1b9d88160bd 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/RemoveProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/RemoveProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; @@ -55,7 +56,7 @@ public class RemoveProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/RenameProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/RenameProcessorFactoryTests.java index ea6284f305a..85fc3e71bba 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/RenameProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/RenameProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; @@ -50,7 +51,7 @@ public class RenameProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } @@ -62,7 +63,7 @@ public class RenameProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[to] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/SetProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/SetProcessorFactoryTests.java index 1c3cf15e48f..2db2dcd5e1c 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/SetProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/SetProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; @@ -58,7 +59,7 @@ public class SetProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } @@ -69,7 +70,7 @@ public class SetProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[value] required property is missing")); } } @@ -81,7 +82,7 @@ public class SetProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[value] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/SplitProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/SplitProcessorFactoryTests.java index 3bd2f95e2bc..70fca6f501b 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/SplitProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/SplitProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; @@ -50,7 +51,7 @@ public class SplitProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } @@ -62,7 +63,7 @@ public class SplitProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[separator] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/TrimProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/TrimProcessorFactoryTests.java index 8012893bfcb..1e74b78f973 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/TrimProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/TrimProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; @@ -47,7 +48,7 @@ public class TrimProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/UppercaseProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/UppercaseProcessorFactoryTests.java index 914909f9378..40e14b5f14d 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/UppercaseProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/UppercaseProcessorFactoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; @@ -47,7 +48,7 @@ public class UppercaseProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("factory create should have failed"); - } catch(ConfigurationPropertyException e) { + } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java index 29f497458c7..2c248969b2c 100644 --- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java @@ -62,9 +62,9 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { public void testMissingSetting() throws InterruptedException { String collector = randomAsciiOfLength(5); Set> entries = new HashSet<>(); - entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".warn", randomTimeValue())); - entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".info", randomTimeValue())); - entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".debug", randomTimeValue())); + entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".warn", randomPositiveTimeValue())); + entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".info", randomPositiveTimeValue())); + entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".debug", randomPositiveTimeValue())); Settings.Builder builder = Settings.builder(); // drop a random setting or two diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java index 2a121be509c..d1f25a8fb4f 100644 --- a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java +++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java @@ -23,8 +23,7 @@ import org.apache.http.impl.client.HttpClients; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.http.HttpServerTransport; -import org.elasticsearch.http.netty.NettyHttpServerTransport; -import org.elasticsearch.node.Node; +import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -45,7 +44,7 @@ public class DetailedErrorsDisabledIT extends ESIntegTestCase { return Settings.settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .put(NettyHttpServerTransport.SETTING_HTTP_DETAILED_ERRORS_ENABLED.getKey(), false) + .put(HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED.getKey(), false) .build(); } diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginManagerCliTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginCliTests.java similarity index 58% rename from core/src/test/java/org/elasticsearch/plugins/PluginManagerCliTests.java rename to core/src/test/java/org/elasticsearch/plugins/PluginCliTests.java index f16f9981d93..3a121590083 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginManagerCliTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginCliTests.java @@ -19,47 +19,32 @@ package org.elasticsearch.plugins; -import org.elasticsearch.common.cli.CliTool; import org.elasticsearch.common.cli.CliToolTestCase; -import java.io.IOException; -import java.net.MalformedURLException; -import java.nio.file.Path; - -import static org.elasticsearch.common.cli.CliTool.ExitStatus.IO_ERROR; import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK_AND_EXIT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; -public class PluginManagerCliTests extends CliToolTestCase { - public void testHelpWorks() throws IOException { +public class PluginCliTests extends CliToolTestCase { + public void testHelpWorks() throws Exception { CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal(); - assertThat(new PluginManagerCliParser(terminal).execute(args("--help")), is(OK_AND_EXIT)); + assertThat(new PluginCli(terminal).execute(args("--help")), is(OK_AND_EXIT)); assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin.help"); terminal.getTerminalOutput().clear(); - assertThat(new PluginManagerCliParser(terminal).execute(args("install -h")), is(OK_AND_EXIT)); + assertThat(new PluginCli(terminal).execute(args("install -h")), is(OK_AND_EXIT)); assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-install.help"); - for (String plugin : PluginManager.OFFICIAL_PLUGINS) { + for (String plugin : InstallPluginCommand.OFFICIAL_PLUGINS) { assertThat(terminal.getTerminalOutput(), hasItem(containsString(plugin))); } terminal.getTerminalOutput().clear(); - assertThat(new PluginManagerCliParser(terminal).execute(args("remove --help")), is(OK_AND_EXIT)); + assertThat(new PluginCli(terminal).execute(args("remove --help")), is(OK_AND_EXIT)); assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-remove.help"); terminal.getTerminalOutput().clear(); - assertThat(new PluginManagerCliParser(terminal).execute(args("list -h")), is(OK_AND_EXIT)); + assertThat(new PluginCli(terminal).execute(args("list -h")), is(OK_AND_EXIT)); assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-list.help"); } - - public void testUrlSpacesInPath() throws MalformedURLException { - CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal(); - Path tmpDir = createTempDir().resolve("foo deps"); - String finalDir = tmpDir.toAbsolutePath().toUri().toURL().toString(); - logger.warn(finalDir); - CliTool.ExitStatus execute = new PluginManagerCliParser(terminal).execute(args("install " + finalDir)); - assertThat(execute.status(), is(IO_ERROR.status())); - } } diff --git a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java index 61dca3f37af..8c6b71c9eac 100644 --- a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -151,7 +151,7 @@ public class FullRollingRestartIT extends ESIntegTestCase { ClusterState state = client().admin().cluster().prepareState().get().getState(); RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { - assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state.prettyPrint(), recoveryState.getType() != RecoveryState.Type.RELOCATION); + assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state.prettyPrint(), recoveryState.getType() != RecoveryState.Type.PRIMARY_RELOCATION); } internalCluster().restartRandomDataNode(); ensureGreen(); @@ -159,7 +159,7 @@ public class FullRollingRestartIT extends ESIntegTestCase { recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { - assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state.prettyPrint() + "\nafter: \n" + afterState.prettyPrint(), recoveryState.getType() != RecoveryState.Type.RELOCATION); + assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state.prettyPrint() + "\nafter: \n" + afterState.prettyPrint(), recoveryState.getType() != RecoveryState.Type.PRIMARY_RELOCATION); } } } diff --git a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java index 1c624f98e2f..9740032ed7e 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java @@ -22,15 +22,14 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.rest.client.http.HttpResponse; -import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS; -import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN; -import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -47,7 +46,7 @@ public class CorsRegexIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) - .put(SETTING_CORS_ALLOW_ORIGIN, "/https?:\\/\\/localhost(:[0-9]+)?/") + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "/https?:\\/\\/localhost(:[0-9]+)?/") .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) .put(SETTING_CORS_ENABLED.getKey(), true) .put(NetworkModule.HTTP_ENABLED.getKey(), true) diff --git a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index ea225d9680b..e36ac662342 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -115,7 +115,7 @@ public class SimpleThreadPoolIT extends ESIntegTestCase { for (String threadName : threadNames) { // ignore some shared threads we know that are created within the same VM, like the shared discovery one // or the ones that are occasionally come up from ESSingleNodeTestCase - if (threadName.contains("[" + ESSingleNodeTestCase.nodeName() + "]") + if (threadName.contains("[node_s_0]") // TODO: this can't possibly be right! single node and integ test are unrelated! || threadName.contains("Keep-Alive-Timer")) { continue; } diff --git a/core/src/test/resources/indices/bwc/index-2.1.2.zip b/core/src/test/resources/indices/bwc/index-2.1.2.zip new file mode 100644 index 00000000000..739c104a236 Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-2.1.2.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.2.0.zip b/core/src/test/resources/indices/bwc/index-2.2.0.zip new file mode 100644 index 00000000000..f9011ffce07 Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-2.2.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.1.2.zip b/core/src/test/resources/indices/bwc/repo-2.1.2.zip new file mode 100644 index 00000000000..a89507f0042 Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-2.1.2.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.2.0.zip b/core/src/test/resources/indices/bwc/repo-2.2.0.zip new file mode 100644 index 00000000000..90f8282f46a Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-2.2.0.zip differ diff --git a/core/src/test/resources/indices/bwc/unsupported-1.7.4.zip b/core/src/test/resources/indices/bwc/unsupported-1.7.4.zip new file mode 100644 index 00000000000..a47ff4faffc Binary files /dev/null and b/core/src/test/resources/indices/bwc/unsupported-1.7.4.zip differ diff --git a/core/src/test/resources/indices/bwc/unsupported-1.7.5.zip b/core/src/test/resources/indices/bwc/unsupported-1.7.5.zip new file mode 100644 index 00000000000..22625293a1c Binary files /dev/null and b/core/src/test/resources/indices/bwc/unsupported-1.7.5.zip differ diff --git a/core/src/test/resources/indices/bwc/unsupportedrepo-1.7.4.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.7.4.zip new file mode 100644 index 00000000000..86be302153b Binary files /dev/null and b/core/src/test/resources/indices/bwc/unsupportedrepo-1.7.4.zip differ diff --git a/core/src/test/resources/indices/bwc/unsupportedrepo-1.7.5.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.7.5.zip new file mode 100644 index 00000000000..46bada25ced Binary files /dev/null and b/core/src/test/resources/indices/bwc/unsupportedrepo-1.7.5.zip differ diff --git a/core/src/test/resources/org/elasticsearch/plugins/loading/classpath/es-plugin-test.properties b/core/src/test/resources/org/elasticsearch/plugins/loading/classpath/es-plugin-test.properties deleted file mode 100644 index f57bea58cf2..00000000000 --- a/core/src/test/resources/org/elasticsearch/plugins/loading/classpath/es-plugin-test.properties +++ /dev/null @@ -1,19 +0,0 @@ -################################################################ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -################################################################ -plugin=org.elasticsearch.plugins.loading.classpath.InClassPathPlugin \ No newline at end of file diff --git a/dev-tools/smoke_test_rc.py b/dev-tools/smoke_test_rc.py index 8c8a6fb9fae..f32a6b80f52 100644 --- a/dev-tools/smoke_test_rc.py +++ b/dev-tools/smoke_test_rc.py @@ -66,7 +66,6 @@ DEFAULT_PLUGINS = ["analysis-icu", "discovery-azure", "discovery-ec2", "discovery-gce", - "discovery-multicast", "lang-javascript", "lang-painless", "lang-python", diff --git a/distribution/src/main/resources/bin/plugin b/distribution/src/main/resources/bin/plugin index 95011870358..1bab4b1118c 100755 --- a/distribution/src/main/resources/bin/plugin +++ b/distribution/src/main/resources/bin/plugin @@ -110,4 +110,4 @@ fi HOSTNAME=`hostname | cut -d. -f1` export HOSTNAME -eval "$JAVA" -client -Delasticsearch -Des.path.home="\"$ES_HOME\"" $properties -cp "\"$ES_HOME/lib/*\"" org.elasticsearch.plugins.PluginManagerCliParser $args +eval "$JAVA" -client -Delasticsearch -Des.path.home="\"$ES_HOME\"" $properties -cp "\"$ES_HOME/lib/*\"" org.elasticsearch.plugins.PluginCli $args diff --git a/distribution/src/main/resources/bin/plugin.bat b/distribution/src/main/resources/bin/plugin.bat index c41b0156636..6c6be019fc6 100644 Binary files a/distribution/src/main/resources/bin/plugin.bat and b/distribution/src/main/resources/bin/plugin.bat differ diff --git a/docs/plugins/discovery-multicast.asciidoc b/docs/plugins/discovery-multicast.asciidoc deleted file mode 100644 index 75acbd89577..00000000000 --- a/docs/plugins/discovery-multicast.asciidoc +++ /dev/null @@ -1,55 +0,0 @@ -[[discovery-multicast]] -=== Multicast Discovery Plugin - -The Multicast Discovery plugin provides the ability to form a cluster using -TCP/IP multicast messages. - -[[discovery-multicast-install]] -[float] -==== Installation - -This plugin can be installed using the plugin manager: - -[source,sh] ----------------------------------------------------------------- -sudo bin/plugin install discovery-multicast ----------------------------------------------------------------- - -The plugin must be installed on every node in the cluster, and each node must -be restarted after installation. - -[[discovery-multicast-remove]] -[float] -==== Removal - -The plugin can be removed with the following command: - -[source,sh] ----------------------------------------------------------------- -sudo bin/plugin remove discovery-multicast ----------------------------------------------------------------- - -The node must be stopped before removing the plugin. - -[[discovery-multicast-usage]] -==== Configuring multicast discovery - -Multicast ping discovery of other nodes is done by sending one or more -multicast requests which existing nodes will receive and -respond to. It provides the following settings with the -`discovery.zen.ping.multicast` prefix: - -[cols="<,<",options="header",] -|======================================================================= -|Setting |Description -|`group` |The group address to use. Defaults to `224.2.2.4`. - -|`port` |The port to use. Defaults to `54328`. - -|`ttl` |The ttl of the multicast message. Defaults to `3`. - -|`address` |The address to bind to, defaults to `null` which means it -will bind `network.bind_host` - -|`enabled` |Whether multicast ping discovery is enabled. Defaults to `false`. -|======================================================================= diff --git a/docs/plugins/discovery.asciidoc b/docs/plugins/discovery.asciidoc index cfc98e45dee..62c5b4551ac 100644 --- a/docs/plugins/discovery.asciidoc +++ b/docs/plugins/discovery.asciidoc @@ -21,10 +21,6 @@ The Azure discovery plugin uses the Azure API for unicast discovery. The Google Compute Engine discovery plugin uses the GCE API for unicast discovery. -<>:: - -The multicast plugin sends multicast messages to discover other nodes in the cluster. - [float] ==== Community contributed discovery plugins @@ -41,5 +37,3 @@ include::discovery-azure.asciidoc[] include::discovery-gce.asciidoc[] -include::discovery-multicast.asciidoc[] - diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc index e4cdae5a781..540f4c25f79 100644 --- a/docs/reference/aggregations/pipeline.asciidoc +++ b/docs/reference/aggregations/pipeline.asciidoc @@ -22,7 +22,7 @@ parameter to indicate the paths to the required metrics. The syntax for defining Pipeline aggregations cannot have sub-aggregations but depending on the type it can reference another pipeline in the `buckets_path` allowing pipeline aggregations to be chained. For example, you can chain together two derivatives to calculate the second derivative -(e.g. a derivative of a derivative). +(i.e. a derivative of a derivative). NOTE: Because pipeline aggregations only add to the output, when chaining pipeline aggregations the output of each pipeline aggregation will be included in the final output. diff --git a/docs/reference/ingest/ingest.asciidoc b/docs/reference/ingest/ingest.asciidoc index e1ce35eb23c..e9226e7d537 100644 --- a/docs/reference/ingest/ingest.asciidoc +++ b/docs/reference/ingest/ingest.asciidoc @@ -534,27 +534,6 @@ to the requester. } -------------------------------------------------- -==== DeDot Processor -The DeDot Processor is used to remove dots (".") from field names and -replace them with a specific `separator` string. - -[[dedot-options]] -.DeDot Options -[options="header"] -|====== -| Name | Required | Default | Description -| `separator` | yes | "_" | The string to replace dots with in all field names -|====== - -[source,js] --------------------------------------------------- -{ - "dedot": { - "separator": "_" - } -} --------------------------------------------------- - === Accessing data in pipelines @@ -725,7 +704,7 @@ the index for which failed documents get sent. Sometimes you may want to retrieve the actual error message that was thrown by a failed processor. To do so you can access metadata fields called -`on_failure_message` and `on_failure_processor`. These fields are only accessible +`on_failure_message`, `on_failure_processor_type`, `on_failure_processor_tag`. These fields are only accessible from within the context of an `on_failure` block. Here is an updated version of our first example which leverages these fields to provide the error message instead of manually setting it. diff --git a/docs/reference/migration/migrate_2_0/network.asciidoc b/docs/reference/migration/migrate_2_0/network.asciidoc index 2f23c3f924a..d493bff5688 100644 --- a/docs/reference/migration/migrate_2_0/network.asciidoc +++ b/docs/reference/migration/migrate_2_0/network.asciidoc @@ -19,15 +19,11 @@ bin/elasticsearch --network.host _non_loopback_ The full list of options that network.host accepts can be found in the <>. -==== Multicast removed +==== Unicast discovery -Multicast has been removed (although it is still -{plugins}/discovery-multicast.html[provided as a plugin] for now). Instead, -and only when bound to localhost, Elasticsearch will use unicast to contact +When bound to localhost, Elasticsearch will use unicast to contact the first 5 ports in the `transport.tcp.port` range, which defaults to -`9300-9400`. - -This preserves the zero-config auto-clustering experience for the developer, +`9300-9400`. This preserves the zero-config auto-clustering experience for the developer, but it means that you will have to provide a list of <> when moving to production, for instance: diff --git a/docs/reference/migration/migrate_2_0/removals.asciidoc b/docs/reference/migration/migrate_2_0/removals.asciidoc index 55f76c6f30e..82e1cd923f6 100644 --- a/docs/reference/migration/migrate_2_0/removals.asciidoc +++ b/docs/reference/migration/migrate_2_0/removals.asciidoc @@ -58,8 +58,6 @@ still need to use multicast discovery, you can install the plugin with: ./bin/plugin install discovery-multicast ------------------ -See {plugins}/discovery-multicast.html for more information. - ==== `_shutdown` API The `_shutdown` API has been removed without a replacement. Nodes should be diff --git a/docs/reference/migration/migrate_2_0/settings.asciidoc b/docs/reference/migration/migrate_2_0/settings.asciidoc index 60f80b04e93..5e840ac3653 100644 --- a/docs/reference/migration/migrate_2_0/settings.asciidoc +++ b/docs/reference/migration/migrate_2_0/settings.asciidoc @@ -126,6 +126,10 @@ to prevent clashes with the watcher plugin * `watcher.interval.medium` is now `resource.reload.interval.medium` * `watcher.interval.high` is now `resource.reload.interval.high` +==== index.gateway setting renamed + +* `index.gateway.local.sync` is now `index.translog.sync_interval` + ==== Hunspell dictionary configuration The parameter `indices.analysis.hunspell.dictionary.location` has been diff --git a/docs/reference/migration/migrate_2_2.asciidoc b/docs/reference/migration/migrate_2_2.asciidoc index efb063f7c0f..39c059e7f47 100644 --- a/docs/reference/migration/migrate_2_2.asciidoc +++ b/docs/reference/migration/migrate_2_2.asciidoc @@ -48,3 +48,8 @@ Proxy settings have been deprecated and renamed: If you are using proxy settings, update your settings as deprecated ones will be removed in next major version. +[float] +=== Multicast plugin deprecated + +The `discovery-multicast` plugin has been deprecated in 2.2.0 and has +been removed in 3.0.0. diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index c76dec77399..0ea68ecacc8 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -166,7 +166,7 @@ with `_parent` field mapping created before version `2.0.0`. The data of these i The format of the join between parent and child documents have changed with the `2.0.0` release. The old format can't read from version `3.0.0` and onwards. The new format allows for a much more efficient and -scalable join between parent and child documents and the join data structures are stored on on disk +scalable join between parent and child documents and the join data structures are stored on disk data structures as opposed as before the join data structures were stored in the jvm heap space. ==== `score_type` has been removed @@ -319,6 +319,10 @@ disable doc values is by using the `doc_values` property of mappings. Site plugins have been removed. It is recommended to migrate site plugins to Kibana plugins. +==== Multicast plugin removed + +Multicast has been removed. Use unicast discovery, or one of the cloud discovery plugins. + ==== Plugins with custom query implementations Plugins implementing custom queries need to implement the `fromXContent(QueryParseContext)` method in their diff --git a/modules/ingest-grok/src/main/java/org/elasticsearch/ingest/grok/IngestGrokPlugin.java b/modules/ingest-grok/src/main/java/org/elasticsearch/ingest/grok/IngestGrokPlugin.java index 54800ac1603..9ccccadbff3 100644 --- a/modules/ingest-grok/src/main/java/org/elasticsearch/ingest/grok/IngestGrokPlugin.java +++ b/modules/ingest-grok/src/main/java/org/elasticsearch/ingest/grok/IngestGrokPlugin.java @@ -59,7 +59,7 @@ public class IngestGrokPlugin extends Plugin { nodeModule.registerProcessor(GrokProcessor.TYPE, (templateService) -> new GrokProcessor.Factory(builtinPatterns)); } - static Map loadBuiltinPatterns() throws IOException { + public static Map loadBuiltinPatterns() throws IOException { Map builtinPatterns = new HashMap<>(); for (String pattern : PATTERN_NAMES) { try(InputStream is = IngestGrokPlugin.class.getResourceAsStream("/patterns/" + pattern)) { diff --git a/modules/ingest-grok/src/test/java/org/elasticsearch/ingest/grok/GrokProcessorFactoryTests.java b/modules/ingest-grok/src/test/java/org/elasticsearch/ingest/grok/GrokProcessorFactoryTests.java index 1c36e26925d..db98090af39 100644 --- a/modules/ingest-grok/src/test/java/org/elasticsearch/ingest/grok/GrokProcessorFactoryTests.java +++ b/modules/ingest-grok/src/test/java/org/elasticsearch/ingest/grok/GrokProcessorFactoryTests.java @@ -19,9 +19,8 @@ package org.elasticsearch.ingest.grok; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; -import org.elasticsearch.ingest.core.Processor; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; import org.elasticsearch.test.ESTestCase; import java.util.Collections; @@ -54,7 +53,7 @@ public class GrokProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("should fail"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); } @@ -67,7 +66,7 @@ public class GrokProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("should fail"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[pattern] required property is missing")); } diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java index d48eed9e507..5a9b9f5f412 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.azure.AzureDiscovery; @@ -79,29 +80,24 @@ public class AzureDiscoveryModule extends AbstractModule { return false; } - if (isPropertyMissing(settings, Management.SUBSCRIPTION_ID) || - isPropertyMissing(settings, Management.SERVICE_NAME) || - isPropertyMissing(settings, Management.KEYSTORE_PATH) || - isPropertyMissing(settings, Management.KEYSTORE_PASSWORD) - ) { - logger.debug("one or more azure discovery settings are missing. " + + if (isDefined(settings, Management.SUBSCRIPTION_ID_SETTING) && + isDefined(settings, Management.SERVICE_NAME_SETTING) && + isDefined(settings, Management.KEYSTORE_PATH_SETTING) && + isDefined(settings, Management.KEYSTORE_PASSWORD_SETTING)) { + logger.trace("All required properties for Azure discovery are set!"); + return true; + } else { + logger.debug("One or more Azure discovery settings are missing. " + "Check elasticsearch.yml file. Should have [{}], [{}], [{}] and [{}].", - Management.SUBSCRIPTION_ID, - Management.SERVICE_NAME, - Management.KEYSTORE_PATH, - Management.KEYSTORE_PASSWORD); + Management.SUBSCRIPTION_ID_SETTING.getKey(), + Management.SERVICE_NAME_SETTING.getKey(), + Management.KEYSTORE_PATH_SETTING.getKey(), + Management.KEYSTORE_PASSWORD_SETTING.getKey()); return false; } - - logger.trace("all required properties for azure discovery are set!"); - - return true; } - public static boolean isPropertyMissing(Settings settings, String name) throws ElasticsearchException { - if (!Strings.hasText(settings.get(name))) { - return true; - } - return false; + private static boolean isDefined(Settings settings, Setting property) throws ElasticsearchException { + return (property.exists(settings) && Strings.hasText(property.get(settings))); } } diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java index de2343d9d87..0c665c138b8 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java @@ -19,31 +19,25 @@ package org.elasticsearch.cloud.azure.management; +import com.microsoft.windowsazure.core.utils.KeyStoreType; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.azure.AzureUnicastHostsProvider; -import java.util.Locale; - -/** - * - */ public interface AzureComputeService { - static public final class Management { - public static final String API_IMPLEMENTATION = "cloud.azure.management.api.impl"; - - public static final String SUBSCRIPTION_ID = "cloud.azure.management.subscription.id"; - public static final String SERVICE_NAME = "cloud.azure.management.cloud.service.name"; + final class Management { + public static final Setting SUBSCRIPTION_ID_SETTING = Setting.simpleString("cloud.azure.management.subscription.id", false, Setting.Scope.CLUSTER); + public static final Setting SERVICE_NAME_SETTING = Setting.simpleString("cloud.azure.management.cloud.service.name", false, Setting.Scope.CLUSTER); // Keystore settings - public static final String KEYSTORE_PATH = "cloud.azure.management.keystore.path"; - public static final String KEYSTORE_PASSWORD = "cloud.azure.management.keystore.password"; - public static final String KEYSTORE_TYPE = "cloud.azure.management.keystore.type"; + public static final Setting KEYSTORE_PATH_SETTING = Setting.simpleString("cloud.azure.management.keystore.path", false, Setting.Scope.CLUSTER); + public static final Setting KEYSTORE_PASSWORD_SETTING = Setting.simpleString("cloud.azure.management.keystore.password", false, Setting.Scope.CLUSTER); + public static final Setting KEYSTORE_TYPE_SETTING = new Setting<>("cloud.azure.management.keystore.type", KeyStoreType.pkcs12.name(), KeyStoreType::fromString, false, Setting.Scope.CLUSTER); } - static public final class Discovery { + final class Discovery { public static final Setting REFRESH_SETTING = Setting.positiveTimeSetting("discovery.azure.refresh_interval", TimeValue.timeValueSeconds(0), false, Setting.Scope.CLUSTER); public static final Setting HOST_TYPE_SETTING = new Setting<>("discovery.azure.host.type", @@ -53,5 +47,6 @@ public interface AzureComputeService { public static final String DEPLOYMENT_NAME = "discovery.azure.deployment.name"; public static final String DEPLOYMENT_SLOT = "discovery.azure.deployment.slot"; } - public HostedServiceGetDetailedResponse getServiceDetails(); + + HostedServiceGetDetailedResponse getServiceDetails(); } diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java index 39221ee6904..04b4f32ea92 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java @@ -36,11 +36,6 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; -import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.KEYSTORE_PASSWORD; -import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.KEYSTORE_PATH; -import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.KEYSTORE_TYPE; -import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.SUBSCRIPTION_ID; - /** * */ @@ -57,20 +52,12 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent start one node"); @@ -53,7 +53,7 @@ public class AzureSimpleTests extends AbstractAzureComputeServiceTestCase { public void testOneNodeShouldRunUsingPublicIp() { Settings.Builder settings = Settings.settingsBuilder() - .put(Management.SERVICE_NAME, "dummy") + .put(Management.SERVICE_NAME_SETTING.getKey(), "dummy") .put(Discovery.HOST_TYPE_SETTING.getKey(), "public_ip"); logger.info("--> start one node"); @@ -66,7 +66,7 @@ public class AzureSimpleTests extends AbstractAzureComputeServiceTestCase { public void testOneNodeShouldRunUsingWrongSettings() { Settings.Builder settings = Settings.settingsBuilder() - .put(Management.SERVICE_NAME, "dummy") + .put(Management.SERVICE_NAME_SETTING.getKey(), "dummy") .put(Discovery.HOST_TYPE_SETTING.getKey(), "do_not_exist"); logger.info("--> start one node"); diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTests.java b/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTests.java index 880c05ed121..bb15ad050f0 100644 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTests.java +++ b/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTests.java @@ -41,7 +41,7 @@ public class AzureTwoStartedNodesTests extends AbstractAzureComputeServiceTestCa @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/11533") public void testTwoNodesShouldRunUsingPrivateIp() { Settings.Builder settings = Settings.settingsBuilder() - .put(Management.SERVICE_NAME, "dummy") + .put(Management.SERVICE_NAME_SETTING.getKey(), "dummy") .put(Discovery.HOST_TYPE_SETTING.getKey(), "private_ip"); logger.info("--> start first node"); @@ -59,7 +59,7 @@ public class AzureTwoStartedNodesTests extends AbstractAzureComputeServiceTestCa @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/11533") public void testTwoNodesShouldRunUsingPublicIp() { Settings.Builder settings = Settings.settingsBuilder() - .put(Management.SERVICE_NAME, "dummy") + .put(Management.SERVICE_NAME_SETTING.getKey(), "dummy") .put(Discovery.HOST_TYPE_SETTING.getKey(), "public_ip"); logger.info("--> start first node"); diff --git a/plugins/discovery-multicast/build.gradle b/plugins/discovery-multicast/build.gradle deleted file mode 100644 index 295f28c094b..00000000000 --- a/plugins/discovery-multicast/build.gradle +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -esplugin { - description 'The Multicast Discovery plugin allows discovery other nodes using multicast requests' - classname 'org.elasticsearch.plugin.discovery.multicast.MulticastDiscoveryPlugin' -} diff --git a/plugins/discovery-multicast/src/main/java/org/elasticsearch/plugin/discovery/multicast/MulticastChannel.java b/plugins/discovery-multicast/src/main/java/org/elasticsearch/plugin/discovery/multicast/MulticastChannel.java deleted file mode 100644 index dee74b9ddce..00000000000 --- a/plugins/discovery-multicast/src/main/java/org/elasticsearch/plugin/discovery/multicast/MulticastChannel.java +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugin.discovery.multicast; - -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; - -import java.io.Closeable; -import java.net.DatagramPacket; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.MulticastSocket; -import java.net.SocketAddress; -import java.net.SocketTimeoutException; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; - -/** - * A multicast channel that supports registering for receive events, and sending datagram packets. Allows - * to easily share the same multicast socket if it holds the same config. - */ -abstract class MulticastChannel implements Closeable { - - /** - * Builds a channel based on the provided config, allowing to control if sharing a channel that uses - * the same config is allowed or not. - */ - public static MulticastChannel getChannel(String name, boolean shared, Config config, Listener listener) throws Exception { - if (!shared) { - return new Plain(listener, name, config); - } - return Shared.getSharedChannel(listener, config); - } - - /** - * Config of multicast channel. - */ - public static final class Config { - public final int port; - public final String group; - public final int bufferSize; - public final int ttl; - public final InetAddress multicastInterface; - public final boolean deferToInterface; - - public Config(int port, String group, int bufferSize, int ttl, - InetAddress multicastInterface, boolean deferToInterface) { - this.port = port; - this.group = group; - this.bufferSize = bufferSize; - this.ttl = ttl; - this.multicastInterface = multicastInterface; - this.deferToInterface = deferToInterface; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Config config = (Config) o; - - if (bufferSize != config.bufferSize) return false; - if (port != config.port) return false; - if (ttl != config.ttl) return false; - if (group != null ? !group.equals(config.group) : config.group != null) return false; - if (multicastInterface != null ? !multicastInterface.equals(config.multicastInterface) : config.multicastInterface != null) - return false; - - return true; - } - - @Override - public int hashCode() { - int result = port; - result = 31 * result + (group != null ? group.hashCode() : 0); - result = 31 * result + bufferSize; - result = 31 * result + ttl; - result = 31 * result + (multicastInterface != null ? multicastInterface.hashCode() : 0); - return result; - } - } - - /** - * Listener that gets called when data is received on the multicast channel. - */ - public static interface Listener { - void onMessage(BytesReference data, SocketAddress address); - } - - /** - * Simple listener that wraps multiple listeners into one. - */ - public static class MultiListener implements Listener { - - private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); - - public void add(Listener listener) { - this.listeners.add(listener); - } - - public boolean remove(Listener listener) { - return this.listeners.remove(listener); - } - - @Override - public void onMessage(BytesReference data, SocketAddress address) { - for (Listener listener : listeners) { - listener.onMessage(data, address); - } - } - } - - protected final Listener listener; - private AtomicBoolean closed = new AtomicBoolean(); - - protected MulticastChannel(Listener listener) { - this.listener = listener; - } - - /** - * Send the data over the multicast channel. - */ - public abstract void send(BytesReference data) throws Exception; - - /** - * Close the channel. - */ - @Override - public void close() { - if (closed.compareAndSet(false, true)) { - close(listener); - } - } - - protected abstract void close(Listener listener); - - public static final String SHARED_CHANNEL_NAME = "#shared#"; - /** - * A shared channel that keeps a static map of Config -> Shared channels, and closes shared - * channel once their reference count has reached 0. It also handles de-registering relevant - * listener from the shared list of listeners. - */ - private final static class Shared extends MulticastChannel { - - private static final Map sharedChannels = new HashMap<>(); - private static final Object mutex = new Object(); // global mutex so we don't sync on static methods (.class) - - static MulticastChannel getSharedChannel(Listener listener, Config config) throws Exception { - - synchronized (mutex) { - Shared shared = sharedChannels.get(config); - if (shared != null) { - shared.incRef(); - ((MultiListener) shared.listener).add(listener); - } else { - MultiListener multiListener = new MultiListener(); - multiListener.add(listener); - shared = new Shared(multiListener, new Plain(multiListener, SHARED_CHANNEL_NAME, config)); - sharedChannels.put(config, shared); - } - return new Delegate(listener, shared); - } - } - - static void close(Shared shared, Listener listener) { - synchronized (mutex) { - // remove this - boolean removed = ((MultiListener) shared.listener).remove(listener); - assert removed : "a listener should be removed"; - if (shared.decRef() == 0) { - assert ((MultiListener) shared.listener).listeners.isEmpty(); - sharedChannels.remove(shared.channel.getConfig()); - shared.channel.close(); - } - } - } - - final Plain channel; - private int refCount = 1; - - Shared(MultiListener listener, Plain channel) { - super(listener); - this.channel = channel; - } - - private void incRef() { - refCount++; - } - - private int decRef() { - --refCount; - assert refCount >= 0 : "illegal ref counting, close called multiple times"; - return refCount; - } - - @Override - public void send(BytesReference data) throws Exception { - channel.send(data); - } - - @Override - public void close() { - assert false : "Shared references should never be closed directly, only via Delegate"; - } - - @Override - protected void close(Listener listener) { - close(this, listener); - } - } - - /** - * A light weight delegate that wraps another channel, mainly to support delegating - * the close method with the provided listener and not holding existing listener. - */ - private final static class Delegate extends MulticastChannel { - - private final MulticastChannel channel; - - Delegate(Listener listener, MulticastChannel channel) { - super(listener); - this.channel = channel; - } - - @Override - public void send(BytesReference data) throws Exception { - channel.send(data); - } - - @Override - protected void close(Listener listener) { - channel.close(listener); // we delegate here to the close with our listener, not with the delegate listener - } - } - - /** - * Simple implementation of a channel. - */ - @SuppressForbidden(reason = "I bind to wildcard addresses. I am a total nightmare") - private static class Plain extends MulticastChannel { - private final ESLogger logger; - private final Config config; - - private volatile MulticastSocket multicastSocket; - private final DatagramPacket datagramPacketSend; - private final DatagramPacket datagramPacketReceive; - - private final Object sendMutex = new Object(); - private final Object receiveMutex = new Object(); - - private final Receiver receiver; - private final Thread receiverThread; - - Plain(Listener listener, String name, Config config) throws Exception { - super(listener); - this.logger = ESLoggerFactory.getLogger(name); - this.config = config; - this.datagramPacketReceive = new DatagramPacket(new byte[config.bufferSize], config.bufferSize); - this.datagramPacketSend = new DatagramPacket(new byte[config.bufferSize], config.bufferSize, InetAddress.getByName(config.group), config.port); - this.multicastSocket = buildMulticastSocket(config); - this.receiver = new Receiver(); - this.receiverThread = daemonThreadFactory(Settings.builder().put("name", name).build(), "discovery#multicast#receiver").newThread(receiver); - this.receiverThread.start(); - } - - private MulticastSocket buildMulticastSocket(Config config) throws Exception { - SocketAddress addr = new InetSocketAddress(InetAddress.getByName(config.group), config.port); - MulticastSocket multicastSocket = new MulticastSocket(config.port); - try { - multicastSocket.setTimeToLive(config.ttl); - // OSX is not smart enough to tell that a socket bound to the - // 'lo0' interface needs to make sure to send the UDP packet - // out of the lo0 interface, so we need to do some special - // workarounds to fix it. - if (config.deferToInterface) { - // 'null' here tells the socket to deter to the interface set - // with .setInterface - multicastSocket.joinGroup(addr, null); - multicastSocket.setInterface(config.multicastInterface); - } else { - multicastSocket.setInterface(config.multicastInterface); - multicastSocket.joinGroup(InetAddress.getByName(config.group)); - } - multicastSocket.setReceiveBufferSize(config.bufferSize); - multicastSocket.setSendBufferSize(config.bufferSize); - multicastSocket.setSoTimeout(60000); - } catch (Throwable e) { - IOUtils.closeWhileHandlingException(multicastSocket); - throw e; - } - return multicastSocket; - } - - public Config getConfig() { - return this.config; - } - - @Override - public void send(BytesReference data) throws Exception { - synchronized (sendMutex) { - datagramPacketSend.setData(data.toBytes()); - multicastSocket.send(datagramPacketSend); - } - } - - @Override - protected void close(Listener listener) { - receiver.stop(); - receiverThread.interrupt(); - if (multicastSocket != null) { - IOUtils.closeWhileHandlingException(multicastSocket); - multicastSocket = null; - } - try { - receiverThread.join(10000); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - - private class Receiver implements Runnable { - - private volatile boolean running = true; - - public void stop() { - running = false; - } - - @Override - public void run() { - while (running) { - try { - synchronized (receiveMutex) { - try { - multicastSocket.receive(datagramPacketReceive); - } catch (SocketTimeoutException ignore) { - continue; - } catch (Exception e) { - if (running) { - if (multicastSocket.isClosed()) { - logger.warn("multicast socket closed while running, restarting..."); - multicastSocket = buildMulticastSocket(config); - } else { - logger.warn("failed to receive packet, throttling...", e); - Thread.sleep(500); - } - } - continue; - } - } - if (datagramPacketReceive.getData().length > 0) { - listener.onMessage(new BytesArray(datagramPacketReceive.getData()), datagramPacketReceive.getSocketAddress()); - } - } catch (Throwable e) { - if (running) { - logger.warn("unexpected exception in multicast receiver", e); - } - } - } - } - } - } -} diff --git a/plugins/discovery-multicast/src/main/java/org/elasticsearch/plugin/discovery/multicast/MulticastDiscoveryPlugin.java b/plugins/discovery-multicast/src/main/java/org/elasticsearch/plugin/discovery/multicast/MulticastDiscoveryPlugin.java deleted file mode 100644 index da9c5ba3c89..00000000000 --- a/plugins/discovery-multicast/src/main/java/org/elasticsearch/plugin/discovery/multicast/MulticastDiscoveryPlugin.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugin.discovery.multicast; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.plugins.Plugin; - -public class MulticastDiscoveryPlugin extends Plugin { - - private final Settings settings; - - public MulticastDiscoveryPlugin(Settings settings) { - this.settings = settings; - } - - @Override - public String name() { - return "discovery-multicast"; - } - - @Override - public String description() { - return "Multicast Discovery Plugin"; - } - - public void onModule(DiscoveryModule module) { - if (settings.getAsBoolean("discovery.zen.ping.multicast.enabled", false)) { - module.addZenPing(MulticastZenPing.class); - } - } - - public void onModule(SettingsModule module) { - module.registerSetting(MulticastZenPing.ADDRESS_SETTING); - module.registerSetting(MulticastZenPing.GROUP_SETTING); - module.registerSetting(MulticastZenPing.PORT_SETTING); - module.registerSetting(MulticastZenPing.SHARED_SETTING); - module.registerSetting(MulticastZenPing.TTL_SETTING); - module.registerSetting(MulticastZenPing.BUFFER_SIZE_SETTING); - module.registerSetting(MulticastZenPing.PING_ENABLED_SETTING); - module.registerSetting(MulticastZenPing.DEFERE_TO_INTERFACE_SETTING); - } -} diff --git a/plugins/discovery-multicast/src/main/java/org/elasticsearch/plugin/discovery/multicast/MulticastZenPing.java b/plugins/discovery-multicast/src/main/java/org/elasticsearch/plugin/discovery/multicast/MulticastZenPing.java deleted file mode 100644 index 46f50235b58..00000000000 --- a/plugins/discovery-multicast/src/main/java/org/elasticsearch/plugin/discovery/multicast/MulticastZenPing.java +++ /dev/null @@ -1,604 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugin.discovery.multicast; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.SocketAddress; -import java.security.AccessController; -import java.security.PrivilegedExceptionAction; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; - -import org.apache.lucene.util.Constants; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.SpecialPermission; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.network.NetworkUtils; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.discovery.zen.ping.PingContextProvider; -import org.elasticsearch.discovery.zen.ping.ZenPing; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.EmptyTransportResponseHandler; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportService; - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - -import static org.elasticsearch.cluster.node.DiscoveryNode.readNode; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; -import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; - -/** - * - */ -public class MulticastZenPing extends AbstractLifecycleComponent implements ZenPing { - - public static final String ACTION_NAME = "internal:discovery/zen/multicast"; - - private static final byte[] INTERNAL_HEADER = new byte[]{1, 9, 8, 4}; - - private static final int PING_SIZE_ESTIMATE = 150; - - private final String address; - private final int port; - private final String group; - private final int bufferSize; - private final int ttl; - - private final ThreadPool threadPool; - private final TransportService transportService; - private final ClusterName clusterName; - private final NetworkService networkService; - private final Version version; - private volatile PingContextProvider contextProvider; - - private final boolean pingEnabled; - - private volatile MulticastChannel multicastChannel; - - private final AtomicInteger pingIdGenerator = new AtomicInteger(); - private final Map receivedResponses = newConcurrentMap(); - public static final Setting ADDRESS_SETTING = Setting.simpleString("discovery.zen.ping.multicast.address", false, Setting.Scope.CLUSTER); - public static final Setting PORT_SETTING = Setting.intSetting("discovery.zen.ping.multicast.port", 54328, 0, (1<<16)-1, false, Setting.Scope.CLUSTER); - public static final Setting GROUP_SETTING = new Setting<>("discovery.zen.ping.multicast.group", "224.2.2.4", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting BUFFER_SIZE_SETTING = Setting.byteSizeSetting("discovery.zen.ping.multicast.buffer_size", new ByteSizeValue(2048, ByteSizeUnit.BYTES), false, Setting.Scope.CLUSTER); - public static final Setting TTL_SETTING = Setting.intSetting("discovery.zen.ping.multicast.ttl", 3, 0, 255, false, Setting.Scope.CLUSTER); - public static final Setting PING_ENABLED_SETTING = Setting.boolSetting("discovery.zen.ping.multicast.ping.enabled", true, false, Setting.Scope.CLUSTER); - public static final Setting SHARED_SETTING = Setting.boolSetting("discovery.zen.ping.multicast.shared", Constants.MAC_OS_X, false, Setting.Scope.CLUSTER); - public static final Setting DEFERE_TO_INTERFACE_SETTING = Setting.boolSetting("discovery.zen.ping.multicast.defer_group_to_set_interface", Constants.MAC_OS_X, false, Setting.Scope.CLUSTER); - - public MulticastZenPing(ThreadPool threadPool, TransportService transportService, ClusterName clusterName, Version version) { - this(EMPTY_SETTINGS, threadPool, transportService, clusterName, new NetworkService(EMPTY_SETTINGS), version); - } - - @Inject - public MulticastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, NetworkService networkService, Version version) { - super(settings); - this.threadPool = threadPool; - this.transportService = transportService; - this.clusterName = clusterName; - this.networkService = networkService; - this.version = version; - - this.address = ADDRESS_SETTING.exists(settings) ? ADDRESS_SETTING.get(settings) : null; - this.port = PORT_SETTING.get(settings); - this.group = GROUP_SETTING.get(settings); - this.bufferSize = BUFFER_SIZE_SETTING.get(settings).bytesAsInt(); - this.ttl = TTL_SETTING.get(settings); - this.pingEnabled = PING_ENABLED_SETTING.get(settings); - - logger.debug("using group [{}], with port [{}], ttl [{}], and address [{}]", group, port, ttl, address); - - this.transportService.registerRequestHandler(ACTION_NAME, MulticastPingResponse::new, ThreadPool.Names.SAME, new MulticastPingResponseRequestHandler()); - } - - @Override - public void setPingContextProvider(PingContextProvider nodesProvider) { - if (lifecycle.started()) { - throw new IllegalStateException("Can't set nodes provider when started"); - } - this.contextProvider = nodesProvider; - } - - @Override - protected void doStart() { - try { - // we know OSX has bugs in the JVM when creating multiple instances of multicast sockets - // causing for "socket close" exceptions when receive and/or crashes - boolean shared = SHARED_SETTING.get(settings); - // OSX does not correctly send multicasts FROM the right interface - boolean deferToInterface = DEFERE_TO_INTERFACE_SETTING.get(settings); - - final MulticastChannel.Config config = new MulticastChannel.Config(port, group, bufferSize, ttl, - getMulticastInterface(), deferToInterface); - SecurityManager sm = System.getSecurityManager(); - if (sm != null) { - sm.checkPermission(new SpecialPermission()); - } - multicastChannel = AccessController.doPrivileged(new PrivilegedExceptionAction() { - @Override - public MulticastChannel run() throws Exception { - return MulticastChannel.getChannel(nodeName(), shared, config, new Receiver()); - } - }); - } catch (Throwable t) { - String msg = "multicast failed to start [{}], disabling. Consider using IPv4 only (by defining env. variable `ES_USE_IPV4`)"; - logger.info(msg, t, ExceptionsHelper.detailedMessage(t)); - } - } - - - @SuppressWarnings("deprecation") // Used to support funky configuration options - private InetAddress getMulticastInterface() throws IOException { - // don't use publish address, the use case for that is e.g. a firewall or proxy and - // may not even be bound to an interface on this machine! use the first bound address. - List addresses = Arrays.asList(networkService.resolveBindHostAddresses(address == null ? null : new String[] { address })); - NetworkUtils.sortAddresses(addresses); - return addresses.get(0); - } - - @Override - protected void doStop() { - if (multicastChannel != null) { - multicastChannel.close(); - multicastChannel = null; - } - } - - @Override - protected void doClose() { - } - - public PingResponse[] pingAndWait(TimeValue timeout) { - final AtomicReference response = new AtomicReference<>(); - final CountDownLatch latch = new CountDownLatch(1); - try { - ping(new PingListener() { - @Override - public void onPing(PingResponse[] pings) { - response.set(pings); - latch.countDown(); - } - }, timeout); - } catch (EsRejectedExecutionException ex) { - logger.debug("Ping execution rejected", ex); - return PingResponse.EMPTY; - } - try { - latch.await(); - return response.get(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return PingResponse.EMPTY; - } - } - - @Override - public void ping(final PingListener listener, final TimeValue timeout) { - if (!pingEnabled || multicastChannel == null) { - threadPool.generic().execute(new Runnable() { - @Override - public void run() { - listener.onPing(PingResponse.EMPTY); - } - }); - return; - } - final int id = pingIdGenerator.incrementAndGet(); - try { - receivedResponses.put(id, new PingCollection()); - sendPingRequest(id); - // try and send another ping request halfway through (just in case someone woke up during it...) - // this can be a good trade-off to nailing the initial lookup or un-delivered messages - threadPool.schedule(TimeValue.timeValueMillis(timeout.millis() / 2), ThreadPool.Names.GENERIC, new AbstractRunnable() { - @Override - public void onFailure(Throwable t) { - logger.warn("[{}] failed to send second ping request", t, id); - finalizePingCycle(id, listener); - } - - @Override - public void doRun() { - sendPingRequest(id); - threadPool.schedule(TimeValue.timeValueMillis(timeout.millis() / 2), ThreadPool.Names.GENERIC, new AbstractRunnable() { - @Override - public void onFailure(Throwable t) { - logger.warn("[{}] failed to send third ping request", t, id); - finalizePingCycle(id, listener); - } - - @Override - public void doRun() { - // make one last ping, but finalize as soon as all nodes have responded or a timeout has past - PingCollection collection = receivedResponses.get(id); - FinalizingPingCollection finalizingPingCollection = new FinalizingPingCollection(id, collection, collection.size(), listener); - receivedResponses.put(id, finalizingPingCollection); - logger.trace("[{}] sending last pings", id); - sendPingRequest(id); - threadPool.schedule(TimeValue.timeValueMillis(timeout.millis() / 4), ThreadPool.Names.GENERIC, new AbstractRunnable() { - @Override - public void onFailure(Throwable t) { - logger.warn("[{}] failed to finalize ping", t, id); - } - - @Override - protected void doRun() throws Exception { - finalizePingCycle(id, listener); - } - }); - } - }); - } - }); - } catch (Exception e) { - logger.warn("failed to ping", e); - finalizePingCycle(id, listener); - } - } - - /** - * takes all pings collected for a given id and pass them to the given listener. - * this method is safe to call multiple times as is guaranteed to only finalize once. - */ - private void finalizePingCycle(int id, final PingListener listener) { - PingCollection responses = receivedResponses.remove(id); - if (responses != null) { - listener.onPing(responses.toArray()); - } - } - - private void sendPingRequest(int id) { - try { - BytesStreamOutput out = new BytesStreamOutput(PING_SIZE_ESTIMATE); - out.writeBytes(INTERNAL_HEADER); - // TODO: change to min_required version! - Version.writeVersion(version, out); - out.writeInt(id); - clusterName.writeTo(out); - contextProvider.nodes().localNode().writeTo(out); - out.close(); - multicastChannel.send(out.bytes()); - if (logger.isTraceEnabled()) { - logger.trace("[{}] sending ping request", id); - } - } catch (Exception e) { - if (lifecycle.stoppedOrClosed()) { - return; - } - if (logger.isDebugEnabled()) { - logger.debug("failed to send multicast ping request", e); - } else { - logger.warn("failed to send multicast ping request: {}", ExceptionsHelper.detailedMessage(e)); - } - } - } - - class FinalizingPingCollection extends PingCollection { - final private PingCollection internalCollection; - final private int expectedResponses; - final private AtomicInteger responseCount; - final private PingListener listener; - final private int id; - - public FinalizingPingCollection(int id, PingCollection internalCollection, int expectedResponses, PingListener listener) { - this.id = id; - this.internalCollection = internalCollection; - this.expectedResponses = expectedResponses; - this.responseCount = new AtomicInteger(); - this.listener = listener; - } - - @Override - public synchronized boolean addPing(PingResponse ping) { - if (internalCollection.addPing(ping)) { - if (responseCount.incrementAndGet() >= expectedResponses) { - logger.trace("[{}] all nodes responded", id); - finish(); - } - return true; - } - return false; - } - - @Override - public synchronized void addPings(PingResponse[] pings) { - internalCollection.addPings(pings); - } - - @Override - public synchronized PingResponse[] toArray() { - return internalCollection.toArray(); - } - - void finish() { - // spawn another thread as we may be running on a network thread - threadPool.generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Throwable t) { - logger.error("failed to call ping listener", t); - } - - @Override - protected void doRun() throws Exception { - finalizePingCycle(id, listener); - } - }); - } - } - - class MulticastPingResponseRequestHandler implements TransportRequestHandler { - @Override - public void messageReceived(MulticastPingResponse request, TransportChannel channel) throws Exception { - if (logger.isTraceEnabled()) { - logger.trace("[{}] received {}", request.id, request.pingResponse); - } - PingCollection responses = receivedResponses.get(request.id); - if (responses == null) { - logger.warn("received ping response {} with no matching id [{}]", request.pingResponse, request.id); - } else { - responses.addPing(request.pingResponse); - } - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - - public static class MulticastPingResponse extends TransportRequest { - - int id; - - PingResponse pingResponse; - - public MulticastPingResponse() { - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readInt(); - pingResponse = PingResponse.readPingResponse(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeInt(id); - pingResponse.writeTo(out); - } - } - - - private class Receiver implements MulticastChannel.Listener { - - @Override - public void onMessage(BytesReference data, SocketAddress address) { - int id = -1; - DiscoveryNode requestingNodeX = null; - ClusterName clusterName = null; - - Map externalPingData = null; - XContentType xContentType = null; - - try { - boolean internal = false; - if (data.length() > 4) { - int counter = 0; - for (; counter < INTERNAL_HEADER.length; counter++) { - if (data.get(counter) != INTERNAL_HEADER[counter]) { - break; - } - } - if (counter == INTERNAL_HEADER.length) { - internal = true; - } - } - if (internal) { - StreamInput input = StreamInput.wrap(new BytesArray(data.toBytes(), INTERNAL_HEADER.length, data.length() - INTERNAL_HEADER.length)); - Version version = Version.readVersion(input); - input.setVersion(version); - id = input.readInt(); - clusterName = ClusterName.readClusterName(input); - requestingNodeX = readNode(input); - } else { - xContentType = XContentFactory.xContentType(data); - if (xContentType != null) { - // an external ping - try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(data)) { - externalPingData = parser.map(); - } - } else { - throw new IllegalStateException("failed multicast message, probably message from previous version"); - } - } - if (externalPingData != null) { - handleExternalPingRequest(externalPingData, xContentType, address); - } else { - handleNodePingRequest(id, requestingNodeX, clusterName); - } - } catch (Exception e) { - if (!lifecycle.started() || (e instanceof EsRejectedExecutionException)) { - logger.debug("failed to read requesting data from {}", e, address); - } else { - logger.warn("failed to read requesting data from {}", e, address); - } - } - } - - @SuppressWarnings("unchecked") - private void handleExternalPingRequest(Map externalPingData, XContentType contentType, SocketAddress remoteAddress) { - if (externalPingData.containsKey("response")) { - // ignoring responses sent over the multicast channel - logger.trace("got an external ping response (ignoring) from {}, content {}", remoteAddress, externalPingData); - return; - } - - if (multicastChannel == null) { - logger.debug("can't send ping response, no socket, from {}, content {}", remoteAddress, externalPingData); - return; - } - - Map request = (Map) externalPingData.get("request"); - if (request == null) { - logger.warn("malformed external ping request, no 'request' element from {}, content {}", remoteAddress, externalPingData); - return; - } - - final String requestClusterName = request.containsKey("cluster_name") ? request.get("cluster_name").toString() : request.containsKey("clusterName") ? request.get("clusterName").toString() : null; - if (requestClusterName == null) { - logger.warn("malformed external ping request, missing 'cluster_name' element within request, from {}, content {}", remoteAddress, externalPingData); - return; - } - - if (!requestClusterName.equals(clusterName.value())) { - logger.trace("got request for cluster_name {}, but our cluster_name is {}, from {}, content {}", - requestClusterName, clusterName.value(), remoteAddress, externalPingData); - return; - } - if (logger.isTraceEnabled()) { - logger.trace("got external ping request from {}, content {}", remoteAddress, externalPingData); - } - - try { - DiscoveryNode localNode = contextProvider.nodes().localNode(); - - XContentBuilder builder = XContentFactory.contentBuilder(contentType); - builder.startObject().startObject("response"); - builder.field("cluster_name", clusterName.value()); - builder.startObject("version").field("number", version.number()).field("snapshot_build", version.snapshot).endObject(); - builder.field("transport_address", localNode.address().toString()); - - if (contextProvider.nodeService() != null) { - for (Map.Entry attr : contextProvider.nodeService().attributes().entrySet()) { - builder.field(attr.getKey(), attr.getValue()); - } - } - - builder.startObject("attributes"); - for (ObjectObjectCursor attr : localNode.attributes()) { - builder.field(attr.key, attr.value); - } - builder.endObject(); - - builder.endObject().endObject(); - multicastChannel.send(builder.bytes()); - if (logger.isTraceEnabled()) { - logger.trace("sending external ping response {}", builder.string()); - } - } catch (Exception e) { - logger.warn("failed to send external multicast response", e); - } - } - - private void handleNodePingRequest(int id, DiscoveryNode requestingNodeX, ClusterName requestClusterName) { - if (!pingEnabled || multicastChannel == null) { - return; - } - final DiscoveryNodes discoveryNodes = contextProvider.nodes(); - final DiscoveryNode requestingNode = requestingNodeX; - if (requestingNode.id().equals(discoveryNodes.localNodeId())) { - // that's me, ignore - return; - } - if (!requestClusterName.equals(clusterName)) { - if (logger.isTraceEnabled()) { - logger.trace("[{}] received ping_request from [{}], but wrong cluster_name [{}], expected [{}], ignoring", - id, requestingNode, requestClusterName.value(), clusterName.value()); - } - return; - } - // don't connect between two client nodes, no need for that... - if (!discoveryNodes.localNode().shouldConnectTo(requestingNode)) { - if (logger.isTraceEnabled()) { - logger.trace("[{}] received ping_request from [{}], both are client nodes, ignoring", id, requestingNode, requestClusterName); - } - return; - } - final MulticastPingResponse multicastPingResponse = new MulticastPingResponse(); - multicastPingResponse.id = id; - multicastPingResponse.pingResponse = new PingResponse(discoveryNodes.localNode(), discoveryNodes.masterNode(), clusterName, contextProvider.nodeHasJoinedClusterOnce()); - - if (logger.isTraceEnabled()) { - logger.trace("[{}] received ping_request from [{}], sending {}", id, requestingNode, multicastPingResponse.pingResponse); - } - - if (!transportService.nodeConnected(requestingNode)) { - // do the connect and send on a thread pool - threadPool.generic().execute(new Runnable() { - @Override - public void run() { - // connect to the node if possible - try { - transportService.connectToNode(requestingNode); - transportService.sendRequest(requestingNode, ACTION_NAME, multicastPingResponse, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleException(TransportException exp) { - logger.warn("failed to receive confirmation on sent ping response to [{}]", exp, requestingNode); - } - }); - } catch (Exception e) { - if (lifecycle.started()) { - logger.warn("failed to connect to requesting node {}", e, requestingNode); - } - } - } - }); - } else { - transportService.sendRequest(requestingNode, ACTION_NAME, multicastPingResponse, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleException(TransportException exp) { - if (lifecycle.started()) { - logger.warn("failed to receive confirmation on sent ping response to [{}]", exp, requestingNode); - } - } - }); - } - } - } -} diff --git a/plugins/discovery-multicast/src/main/plugin-metadata/plugin-security.policy b/plugins/discovery-multicast/src/main/plugin-metadata/plugin-security.policy deleted file mode 100644 index 5752c86bb4f..00000000000 --- a/plugins/discovery-multicast/src/main/plugin-metadata/plugin-security.policy +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -grant { - // needed to bind multicast to arbitrary port - permission java.net.SocketPermission "localhost:1024-", "listen,resolve"; -}; diff --git a/plugins/discovery-multicast/src/test/java/org/elasticsearch/plugin/discovery/multicast/MulticastDiscoveryRestIT.java b/plugins/discovery-multicast/src/test/java/org/elasticsearch/plugin/discovery/multicast/MulticastDiscoveryRestIT.java deleted file mode 100644 index c6af20c011e..00000000000 --- a/plugins/discovery-multicast/src/test/java/org/elasticsearch/plugin/discovery/multicast/MulticastDiscoveryRestIT.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugin.discovery.multicast; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.test.rest.RestTestCandidate; -import org.elasticsearch.test.rest.parser.RestTestParseException; - -import java.io.IOException; - -public class MulticastDiscoveryRestIT extends ESRestTestCase { - - public MulticastDiscoveryRestIT(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return ESRestTestCase.createParameters(0, 1); - } -} - diff --git a/plugins/discovery-multicast/src/test/java/org/elasticsearch/plugin/discovery/multicast/MulticastZenPingTests.java b/plugins/discovery-multicast/src/test/java/org/elasticsearch/plugin/discovery/multicast/MulticastZenPingTests.java deleted file mode 100644 index 8c2d95ec799..00000000000 --- a/plugins/discovery-multicast/src/test/java/org/elasticsearch/plugin/discovery/multicast/MulticastZenPingTests.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugin.discovery.multicast; - -import org.apache.lucene.util.Constants; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.discovery.zen.ping.PingContextProvider; -import org.elasticsearch.discovery.zen.ping.ZenPing; -import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.local.LocalTransport; -import org.hamcrest.Matchers; -import org.junit.Assert; - -import java.net.DatagramPacket; -import java.net.InetAddress; -import java.net.MulticastSocket; - -public class MulticastZenPingTests extends ESTestCase { - - private Settings buildRandomMulticast(Settings settings) { - Settings.Builder builder = Settings.builder().put(settings); - builder.put("discovery.zen.ping.multicast.group", "224.2.3." + randomIntBetween(0, 255)); - builder.put("discovery.zen.ping.multicast.port", randomIntBetween(55000, 56000)); - builder.put("discovery.zen.ping.multicast.enabled", true); - if (randomBoolean()) { - builder.put("discovery.zen.ping.multicast.shared", randomBoolean()); - } - return builder.build(); - } - - public void testSimplePings() throws InterruptedException { - assumeTrue("https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=193246", Constants.FREE_BSD == false); - Settings settings = Settings.EMPTY; - settings = buildRandomMulticast(settings); - Thread.sleep(30000); - - ThreadPool threadPool = new ThreadPool("testSimplePings"); - final ClusterName clusterName = new ClusterName("test"); - final TransportService transportServiceA = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT, new NamedWriteableRegistry()), threadPool).start(); - final DiscoveryNode nodeA = new DiscoveryNode("A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT); - - final TransportService transportServiceB = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT, new NamedWriteableRegistry()), threadPool).start(); - final DiscoveryNode nodeB = new DiscoveryNode("B", transportServiceB.boundAddress().publishAddress(), Version.CURRENT); - - MulticastZenPing zenPingA = new MulticastZenPing(threadPool, transportServiceA, clusterName, Version.CURRENT); - zenPingA.setPingContextProvider(new PingContextProvider() { - @Override - public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(nodeA).localNodeId("A").build(); - } - - @Override - public NodeService nodeService() { - return null; - } - - @Override - public boolean nodeHasJoinedClusterOnce() { - return false; - } - }); - zenPingA.start(); - - MulticastZenPing zenPingB = new MulticastZenPing(threadPool, transportServiceB, clusterName, Version.CURRENT); - zenPingB.setPingContextProvider(new PingContextProvider() { - @Override - public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(nodeB).localNodeId("B").build(); - } - - @Override - public NodeService nodeService() { - return null; - } - - @Override - public boolean nodeHasJoinedClusterOnce() { - return true; - } - }); - zenPingB.start(); - - try { - logger.info("ping from A"); - ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1)); - Assert.assertThat(pingResponses.length, Matchers.equalTo(1)); - Assert.assertThat(pingResponses[0].node().id(), Matchers.equalTo("B")); - Assert.assertTrue(pingResponses[0].hasJoinedOnce()); - - logger.info("ping from B"); - pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(1)); - Assert.assertThat(pingResponses.length, Matchers.equalTo(1)); - Assert.assertThat(pingResponses[0].node().id(), Matchers.equalTo("A")); - Assert.assertFalse(pingResponses[0].hasJoinedOnce()); - - } finally { - zenPingA.close(); - zenPingB.close(); - transportServiceA.close(); - transportServiceB.close(); - terminate(threadPool); - } - } - - // This test is here because when running on FreeBSD, if no tests are - // executed for the 'multicast' project it will assume everything - // failed, so we need to have at least one test that runs. - public void testAlwaysRun() throws Exception { - assertTrue(true); - } - - @SuppressForbidden(reason = "I bind to wildcard addresses. I am a total nightmare") - public void testExternalPing() throws Exception { - assumeTrue("https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=193246", Constants.FREE_BSD == false); - Settings settings = Settings.EMPTY; - settings = buildRandomMulticast(settings); - - final ThreadPool threadPool = new ThreadPool("testExternalPing"); - final ClusterName clusterName = new ClusterName("test"); - final TransportService transportServiceA = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT, new NamedWriteableRegistry()), threadPool).start(); - final DiscoveryNode nodeA = new DiscoveryNode("A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT); - - MulticastZenPing zenPingA = new MulticastZenPing(threadPool, transportServiceA, clusterName, Version.CURRENT); - zenPingA.setPingContextProvider(new PingContextProvider() { - @Override - public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(nodeA).localNodeId("A").build(); - } - - @Override - public NodeService nodeService() { - return null; - } - - @Override - public boolean nodeHasJoinedClusterOnce() { - return false; - } - }); - zenPingA.start(); - - MulticastSocket multicastSocket = null; - try { - Loggers.getLogger(MulticastZenPing.class).setLevel("TRACE"); - multicastSocket = new MulticastSocket(); - multicastSocket.setReceiveBufferSize(2048); - multicastSocket.setSendBufferSize(2048); - multicastSocket.setSoTimeout(60000); - - DatagramPacket datagramPacket = new DatagramPacket(new byte[2048], 2048, InetAddress.getByName("224.2.2.4"), 54328); - XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("request").field("cluster_name", "test").endObject().endObject(); - datagramPacket.setData(builder.bytes().toBytes()); - multicastSocket.send(datagramPacket); - Thread.sleep(100); - } finally { - Loggers.getLogger(MulticastZenPing.class).setLevel("INFO"); - if (multicastSocket != null) multicastSocket.close(); - zenPingA.close(); - terminate(threadPool); - } - } -} diff --git a/plugins/discovery-multicast/src/test/resources/rest-api-spec/test/discovery_multicast/10_basic.yaml b/plugins/discovery-multicast/src/test/resources/rest-api-spec/test/discovery_multicast/10_basic.yaml deleted file mode 100644 index 36172fa2c33..00000000000 --- a/plugins/discovery-multicast/src/test/resources/rest-api-spec/test/discovery_multicast/10_basic.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Integration tests for multicast discovery -# -"Multicast discovery loaded": - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - - do: - nodes.info: {} - - - match: { nodes.$master.plugins.0.name: discovery-multicast } diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index dbcdbbc1a7d..0f51d82de75 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -29,14 +29,13 @@ import com.maxmind.geoip2.record.Country; import com.maxmind.geoip2.record.Location; import com.maxmind.geoip2.record.Subdivision; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.ingest.core.AbstractProcessor; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.IngestDocument; -import org.elasticsearch.ingest.core.Processor; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; import java.io.Closeable; import java.io.IOException; @@ -52,6 +51,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import static org.elasticsearch.ingest.core.ConfigurationUtils.newConfigurationException; import static org.elasticsearch.ingest.core.ConfigurationUtils.readOptionalList; import static org.elasticsearch.ingest.core.ConfigurationUtils.readStringProperty; @@ -94,7 +94,7 @@ public final class GeoIpProcessor extends AbstractProcessor { } break; default: - throw new IllegalStateException("Unsupported database type [" + dbReader.getMetadata().getDatabaseType() + "]"); + throw new ElasticsearchParseException("Unsupported database type [" + dbReader.getMetadata().getDatabaseType() + "]", new IllegalStateException()); } ingestDocument.setFieldValue(targetField, geoData); } @@ -240,7 +240,7 @@ public final class GeoIpProcessor extends AbstractProcessor { try { fields.add(Field.parse(fieldName)); } catch (Exception e) { - throw new ConfigurationPropertyException(TYPE, processorTag, "fields", "illegal field option [" + fieldName + "]. valid values are [" + Arrays.toString(Field.values()) +"]"); + throw newConfigurationException(TYPE, processorTag, "fields", "illegal field option [" + fieldName + "]. valid values are [" + Arrays.toString(Field.values()) + "]"); } } } else { @@ -249,7 +249,7 @@ public final class GeoIpProcessor extends AbstractProcessor { DatabaseReader databaseReader = databaseReaders.get(databaseFile); if (databaseReader == null) { - throw new ConfigurationPropertyException(TYPE, processorTag, "database_file", "database file [" + databaseFile + "] doesn't exist"); + throw newConfigurationException(TYPE, processorTag, "database_file", "database file [" + databaseFile + "] doesn't exist"); } return new GeoIpProcessor(processorTag, ipField, databaseReader, targetField, fields); } diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index f92cb7b479f..570b1e2d18f 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -53,7 +53,7 @@ public class IngestGeoIpPlugin extends Plugin { nodeModule.registerProcessor(GeoIpProcessor.TYPE, (templateService) -> new GeoIpProcessor.Factory(databaseReaders)); } - static Map loadDatabaseReaders(Path geoIpConfigDirectory) throws IOException { + public static Map loadDatabaseReaders(Path geoIpConfigDirectory) throws IOException { if (Files.exists(geoIpConfigDirectory) == false && Files.isDirectory(geoIpConfigDirectory)) { throw new IllegalStateException("the geoip directory [" + geoIpConfigDirectory + "] containing databases doesn't exist"); } diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 410f6e343f7..13143a09651 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -20,9 +20,8 @@ package org.elasticsearch.ingest.geoip; import com.maxmind.geoip2.DatabaseReader; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; -import org.elasticsearch.ingest.core.Processor; -import org.elasticsearch.ingest.processor.ConfigurationPropertyException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.StreamsUtils; import org.junit.AfterClass; @@ -113,7 +112,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("Exception expected"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[database_file] database file [does-not-exist.mmdb] doesn't exist")); } } @@ -146,7 +145,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("exception expected"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[fields] illegal field option [invalid]. valid values are [[IP, COUNTRY_ISO_CODE, COUNTRY_NAME, CONTINENT_NAME, REGION_NAME, CITY_NAME, TIMEZONE, LATITUDE, LONGITUDE, LOCATION]]")); } @@ -156,7 +155,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { try { factory.create(config); fail("exception expected"); - } catch (ConfigurationPropertyException e) { + } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[fields] property isn't a list, but of type [java.lang.String]")); } } diff --git a/plugins/lang-painless/src/main/antlr/PainlessLexer.g4 b/plugins/lang-painless/src/main/antlr/PainlessLexer.g4 index 11cd97cc9e3..866bbd752c8 100644 --- a/plugins/lang-painless/src/main/antlr/PainlessLexer.g4 +++ b/plugins/lang-painless/src/main/antlr/PainlessLexer.g4 @@ -96,7 +96,6 @@ AOR: '|='; ALSH: '<<='; ARSH: '>>='; AUSH: '>>>='; -ACAT: '..='; OCTAL: '0' [0-7]+ [lL]?; HEX: '0' [xX] [0-9a-fA-F]+ [lL]?; diff --git a/plugins/lang-painless/src/main/java/org/elasticsearch/painless/PainlessLexer.java b/plugins/lang-painless/src/main/java/org/elasticsearch/painless/PainlessLexer.java index 3a01626d872..a7cf506da0d 100644 --- a/plugins/lang-painless/src/main/java/org/elasticsearch/painless/PainlessLexer.java +++ b/plugins/lang-painless/src/main/java/org/elasticsearch/painless/PainlessLexer.java @@ -30,9 +30,8 @@ class PainlessLexer extends Lexer { LTE=35, GT=36, GTE=37, EQ=38, EQR=39, NE=40, NER=41, BWAND=42, BWXOR=43, BWOR=44, BOOLAND=45, BOOLOR=46, COND=47, COLON=48, INCR=49, DECR=50, ASSIGN=51, AADD=52, ASUB=53, AMUL=54, ADIV=55, AREM=56, AAND=57, AXOR=58, AOR=59, - ALSH=60, ARSH=61, AUSH=62, ACAT=63, OCTAL=64, HEX=65, INTEGER=66, DECIMAL=67, - STRING=68, CHAR=69, TRUE=70, FALSE=71, NULL=72, TYPE=73, ID=74, EXTINTEGER=75, - EXTID=76; + ALSH=60, ARSH=61, AUSH=62, OCTAL=63, HEX=64, INTEGER=65, DECIMAL=66, STRING=67, + CHAR=68, TRUE=69, FALSE=70, NULL=71, TYPE=72, ID=73, EXTINTEGER=74, EXTID=75; public static final int EXT = 1; public static String[] modeNames = { "DEFAULT_MODE", "EXT" @@ -45,9 +44,9 @@ class PainlessLexer extends Lexer { "MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", "BWXOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", - "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "ACAT", - "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "CHAR", "TRUE", "FALSE", - "NULL", "TYPE", "GENERIC", "ID", "EXTINTEGER", "EXTID" + "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", + "HEX", "INTEGER", "DECIMAL", "STRING", "CHAR", "TRUE", "FALSE", "NULL", + "TYPE", "GENERIC", "ID", "EXTINTEGER", "EXTID" }; private static final String[] _LITERAL_NAMES = { @@ -57,8 +56,8 @@ class PainlessLexer extends Lexer { "'/'", "'%'", "'+'", "'-'", "'<<'", "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", "'/='", - "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", "'..='", null, - null, null, null, null, null, "'true'", "'false'", "'null'" + "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, + null, null, null, null, "'true'", "'false'", "'null'" }; private static final String[] _SYMBOLIC_NAMES = { null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", @@ -67,9 +66,9 @@ class PainlessLexer extends Lexer { "MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", "BWXOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", - "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "ACAT", - "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "CHAR", "TRUE", "FALSE", - "NULL", "TYPE", "ID", "EXTINTEGER", "EXTID" + "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", + "HEX", "INTEGER", "DECIMAL", "STRING", "CHAR", "TRUE", "FALSE", "NULL", + "TYPE", "ID", "EXTINTEGER", "EXTID" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -135,13 +134,13 @@ class PainlessLexer extends Lexer { @Override public void action(RuleContext _localctx, int ruleIndex, int actionIndex) { switch (ruleIndex) { - case 67: + case 66: STRING_action((RuleContext)_localctx, actionIndex); break; - case 68: + case 67: CHAR_action((RuleContext)_localctx, actionIndex); break; - case 72: + case 71: TYPE_action((RuleContext)_localctx, actionIndex); break; } @@ -170,7 +169,7 @@ class PainlessLexer extends Lexer { @Override public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { switch (ruleIndex) { - case 72: + case 71: return TYPE_sempred((RuleContext)_localctx, predIndex); } return true; @@ -184,7 +183,7 @@ class PainlessLexer extends Lexer { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2N\u0236\b\1\b\1\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2M\u0230\b\1\b\1\4"+ "\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n"+ "\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22"+ "\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31"+ @@ -193,195 +192,193 @@ class PainlessLexer extends Lexer { "+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64"+ "\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t"+ "=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4"+ - "I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\3\2\6\2\u00a0\n\2\r\2\16\2\u00a1\3"+ - "\2\3\2\3\3\3\3\3\3\3\3\7\3\u00aa\n\3\f\3\16\3\u00ad\13\3\3\3\3\3\3\3\3"+ - "\3\3\3\7\3\u00b4\n\3\f\3\16\3\u00b7\13\3\3\3\3\3\5\3\u00bb\n\3\3\3\3\3"+ - "\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\n\3\n\3\13"+ - "\3\13\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17"+ - "\3\17\3\17\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22"+ - "\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24"+ - "\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\27\3\27\3\27"+ - "\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\32\3\32\3\33"+ - "\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\37\3\37\3 \3 \3 \3!\3!\3!\3\"\3"+ - "\"\3\"\3\"\3#\3#\3$\3$\3$\3%\3%\3&\3&\3&\3\'\3\'\3\'\3(\3(\3(\3(\3)\3"+ - ")\3)\3*\3*\3*\3*\3+\3+\3,\3,\3-\3-\3.\3.\3.\3/\3/\3/\3\60\3\60\3\61\3"+ - "\61\3\62\3\62\3\62\3\63\3\63\3\63\3\64\3\64\3\65\3\65\3\65\3\66\3\66\3"+ - "\66\3\67\3\67\3\67\38\38\38\39\39\39\3:\3:\3:\3;\3;\3;\3<\3<\3<\3=\3="+ - "\3=\3=\3>\3>\3>\3>\3?\3?\3?\3?\3?\3@\3@\3@\3@\3A\3A\6A\u0185\nA\rA\16"+ - "A\u0186\3A\5A\u018a\nA\3B\3B\3B\6B\u018f\nB\rB\16B\u0190\3B\5B\u0194\n"+ - "B\3C\3C\3C\7C\u0199\nC\fC\16C\u019c\13C\5C\u019e\nC\3C\5C\u01a1\nC\3D"+ - "\3D\3D\7D\u01a6\nD\fD\16D\u01a9\13D\5D\u01ab\nD\3D\3D\7D\u01af\nD\fD\16"+ - "D\u01b2\13D\3D\3D\5D\u01b6\nD\3D\6D\u01b9\nD\rD\16D\u01ba\5D\u01bd\nD"+ - "\3D\5D\u01c0\nD\3E\3E\3E\3E\3E\3E\7E\u01c8\nE\fE\16E\u01cb\13E\3E\3E\3"+ - "E\3F\3F\3F\3F\3F\3G\3G\3G\3G\3G\3H\3H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3J\3"+ - "J\5J\u01e7\nJ\3J\3J\3J\3K\7K\u01ed\nK\fK\16K\u01f0\13K\3K\3K\7K\u01f4"+ - "\nK\fK\16K\u01f7\13K\3K\3K\5K\u01fb\nK\3K\7K\u01fe\nK\fK\16K\u0201\13"+ - "K\3K\3K\7K\u0205\nK\fK\16K\u0208\13K\3K\3K\5K\u020c\nK\3K\7K\u020f\nK"+ - "\fK\16K\u0212\13K\7K\u0214\nK\fK\16K\u0217\13K\3K\3K\3L\3L\7L\u021d\n"+ - "L\fL\16L\u0220\13L\3M\3M\3M\7M\u0225\nM\fM\16M\u0228\13M\5M\u022a\nM\3"+ - "M\3M\3N\3N\7N\u0230\nN\fN\16N\u0233\13N\3N\3N\5\u00ab\u00b5\u01c9\2O\4"+ - "\3\6\4\b\5\n\6\f\7\16\b\20\t\22\n\24\13\26\f\30\r\32\16\34\17\36\20 \21"+ - "\"\22$\23&\24(\25*\26,\27.\30\60\31\62\32\64\33\66\348\35:\36<\37> @!"+ - "B\"D#F$H%J&L\'N(P)R*T+V,X-Z.\\/^\60`\61b\62d\63f\64h\65j\66l\67n8p9r:"+ - "t;v|?~@\u0080A\u0082B\u0084C\u0086D\u0088E\u008aF\u008cG\u008eH\u0090"+ - "I\u0092J\u0094K\u0096\2\u0098L\u009aM\u009cN\4\2\3\21\5\2\13\f\17\17\""+ - "\"\4\2\f\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5\2\62;CHch\3\2\63;\3\2\62;\b"+ - "\2FFHHNNffhhnn\4\2GGgg\4\2--//\4\2HHhh\4\2$$^^\5\2C\\aac|\6\2\62;C\\a"+ - "ac|\u0255\2\4\3\2\2\2\2\6\3\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2"+ - "\2\16\3\2\2\2\2\20\3\2\2\2\2\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30"+ - "\3\2\2\2\2\32\3\2\2\2\2\34\3\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2"+ - "\2$\3\2\2\2\2&\3\2\2\2\2(\3\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60"+ - "\3\2\2\2\2\62\3\2\2\2\2\64\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2"+ - "\2<\3\2\2\2\2>\3\2\2\2\2@\3\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H"+ - "\3\2\2\2\2J\3\2\2\2\2L\3\2\2\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2"+ - "\2\2\2V\3\2\2\2\2X\3\2\2\2\2Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2"+ - "\2\2b\3\2\2\2\2d\3\2\2\2\2f\3\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2"+ - "n\3\2\2\2\2p\3\2\2\2\2r\3\2\2\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3"+ - "\2\2\2\2|\3\2\2\2\2~\3\2\2\2\2\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3"+ - "\2\2\2\2\u0086\3\2\2\2\2\u0088\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2"+ - "\2\u008e\3\2\2\2\2\u0090\3\2\2\2\2\u0092\3\2\2\2\2\u0094\3\2\2\2\2\u0098"+ - "\3\2\2\2\3\u009a\3\2\2\2\3\u009c\3\2\2\2\4\u009f\3\2\2\2\6\u00ba\3\2\2"+ - "\2\b\u00be\3\2\2\2\n\u00c0\3\2\2\2\f\u00c2\3\2\2\2\16\u00c4\3\2\2\2\20"+ - "\u00c6\3\2\2\2\22\u00c8\3\2\2\2\24\u00ca\3\2\2\2\26\u00ce\3\2\2\2\30\u00d0"+ - "\3\2\2\2\32\u00d2\3\2\2\2\34\u00d5\3\2\2\2\36\u00da\3\2\2\2 \u00e0\3\2"+ - "\2\2\"\u00e3\3\2\2\2$\u00e7\3\2\2\2&\u00f0\3\2\2\2(\u00f6\3\2\2\2*\u00fd"+ - "\3\2\2\2,\u0101\3\2\2\2.\u0105\3\2\2\2\60\u010b\3\2\2\2\62\u0111\3\2\2"+ - "\2\64\u0113\3\2\2\2\66\u0115\3\2\2\28\u0117\3\2\2\2:\u0119\3\2\2\2<\u011b"+ - "\3\2\2\2>\u011d\3\2\2\2@\u011f\3\2\2\2B\u0122\3\2\2\2D\u0125\3\2\2\2F"+ - "\u0129\3\2\2\2H\u012b\3\2\2\2J\u012e\3\2\2\2L\u0130\3\2\2\2N\u0133\3\2"+ - "\2\2P\u0136\3\2\2\2R\u013a\3\2\2\2T\u013d\3\2\2\2V\u0141\3\2\2\2X\u0143"+ - "\3\2\2\2Z\u0145\3\2\2\2\\\u0147\3\2\2\2^\u014a\3\2\2\2`\u014d\3\2\2\2"+ - "b\u014f\3\2\2\2d\u0151\3\2\2\2f\u0154\3\2\2\2h\u0157\3\2\2\2j\u0159\3"+ - "\2\2\2l\u015c\3\2\2\2n\u015f\3\2\2\2p\u0162\3\2\2\2r\u0165\3\2\2\2t\u0168"+ - "\3\2\2\2v\u016b\3\2\2\2x\u016e\3\2\2\2z\u0171\3\2\2\2|\u0175\3\2\2\2~"+ - "\u0179\3\2\2\2\u0080\u017e\3\2\2\2\u0082\u0182\3\2\2\2\u0084\u018b\3\2"+ - "\2\2\u0086\u019d\3\2\2\2\u0088\u01aa\3\2\2\2\u008a\u01c1\3\2\2\2\u008c"+ - "\u01cf\3\2\2\2\u008e\u01d4\3\2\2\2\u0090\u01d9\3\2\2\2\u0092\u01df\3\2"+ - "\2\2\u0094\u01e4\3\2\2\2\u0096\u01ee\3\2\2\2\u0098\u021a\3\2\2\2\u009a"+ - "\u0229\3\2\2\2\u009c\u022d\3\2\2\2\u009e\u00a0\t\2\2\2\u009f\u009e\3\2"+ - "\2\2\u00a0\u00a1\3\2\2\2\u00a1\u009f\3\2\2\2\u00a1\u00a2\3\2\2\2\u00a2"+ - "\u00a3\3\2\2\2\u00a3\u00a4\b\2\2\2\u00a4\5\3\2\2\2\u00a5\u00a6\7\61\2"+ - "\2\u00a6\u00a7\7\61\2\2\u00a7\u00ab\3\2\2\2\u00a8\u00aa\13\2\2\2\u00a9"+ - "\u00a8\3\2\2\2\u00aa\u00ad\3\2\2\2\u00ab\u00ac\3\2\2\2\u00ab\u00a9\3\2"+ - "\2\2\u00ac\u00ae\3\2\2\2\u00ad\u00ab\3\2\2\2\u00ae\u00bb\t\3\2\2\u00af"+ - "\u00b0\7\61\2\2\u00b0\u00b1\7,\2\2\u00b1\u00b5\3\2\2\2\u00b2\u00b4\13"+ - "\2\2\2\u00b3\u00b2\3\2\2\2\u00b4\u00b7\3\2\2\2\u00b5\u00b6\3\2\2\2\u00b5"+ - "\u00b3\3\2\2\2\u00b6\u00b8\3\2\2\2\u00b7\u00b5\3\2\2\2\u00b8\u00b9\7,"+ - "\2\2\u00b9\u00bb\7\61\2\2\u00ba\u00a5\3\2\2\2\u00ba\u00af\3\2\2\2\u00bb"+ - "\u00bc\3\2\2\2\u00bc\u00bd\b\3\2\2\u00bd\7\3\2\2\2\u00be\u00bf\7}\2\2"+ - "\u00bf\t\3\2\2\2\u00c0\u00c1\7\177\2\2\u00c1\13\3\2\2\2\u00c2\u00c3\7"+ - "]\2\2\u00c3\r\3\2\2\2\u00c4\u00c5\7_\2\2\u00c5\17\3\2\2\2\u00c6\u00c7"+ - "\7*\2\2\u00c7\21\3\2\2\2\u00c8\u00c9\7+\2\2\u00c9\23\3\2\2\2\u00ca\u00cb"+ - "\7\60\2\2\u00cb\u00cc\3\2\2\2\u00cc\u00cd\b\n\3\2\u00cd\25\3\2\2\2\u00ce"+ - "\u00cf\7.\2\2\u00cf\27\3\2\2\2\u00d0\u00d1\7=\2\2\u00d1\31\3\2\2\2\u00d2"+ - "\u00d3\7k\2\2\u00d3\u00d4\7h\2\2\u00d4\33\3\2\2\2\u00d5\u00d6\7g\2\2\u00d6"+ - "\u00d7\7n\2\2\u00d7\u00d8\7u\2\2\u00d8\u00d9\7g\2\2\u00d9\35\3\2\2\2\u00da"+ - "\u00db\7y\2\2\u00db\u00dc\7j\2\2\u00dc\u00dd\7k\2\2\u00dd\u00de\7n\2\2"+ - "\u00de\u00df\7g\2\2\u00df\37\3\2\2\2\u00e0\u00e1\7f\2\2\u00e1\u00e2\7"+ - "q\2\2\u00e2!\3\2\2\2\u00e3\u00e4\7h\2\2\u00e4\u00e5\7q\2\2\u00e5\u00e6"+ - "\7t\2\2\u00e6#\3\2\2\2\u00e7\u00e8\7e\2\2\u00e8\u00e9\7q\2\2\u00e9\u00ea"+ - "\7p\2\2\u00ea\u00eb\7v\2\2\u00eb\u00ec\7k\2\2\u00ec\u00ed\7p\2\2\u00ed"+ - "\u00ee\7w\2\2\u00ee\u00ef\7g\2\2\u00ef%\3\2\2\2\u00f0\u00f1\7d\2\2\u00f1"+ - "\u00f2\7t\2\2\u00f2\u00f3\7g\2\2\u00f3\u00f4\7c\2\2\u00f4\u00f5\7m\2\2"+ - "\u00f5\'\3\2\2\2\u00f6\u00f7\7t\2\2\u00f7\u00f8\7g\2\2\u00f8\u00f9\7v"+ - "\2\2\u00f9\u00fa\7w\2\2\u00fa\u00fb\7t\2\2\u00fb\u00fc\7p\2\2\u00fc)\3"+ - "\2\2\2\u00fd\u00fe\7p\2\2\u00fe\u00ff\7g\2\2\u00ff\u0100\7y\2\2\u0100"+ - "+\3\2\2\2\u0101\u0102\7v\2\2\u0102\u0103\7t\2\2\u0103\u0104\7{\2\2\u0104"+ - "-\3\2\2\2\u0105\u0106\7e\2\2\u0106\u0107\7c\2\2\u0107\u0108\7v\2\2\u0108"+ - "\u0109\7e\2\2\u0109\u010a\7j\2\2\u010a/\3\2\2\2\u010b\u010c\7v\2\2\u010c"+ - "\u010d\7j\2\2\u010d\u010e\7t\2\2\u010e\u010f\7q\2\2\u010f\u0110\7y\2\2"+ - "\u0110\61\3\2\2\2\u0111\u0112\7#\2\2\u0112\63\3\2\2\2\u0113\u0114\7\u0080"+ - "\2\2\u0114\65\3\2\2\2\u0115\u0116\7,\2\2\u0116\67\3\2\2\2\u0117\u0118"+ - "\7\61\2\2\u01189\3\2\2\2\u0119\u011a\7\'\2\2\u011a;\3\2\2\2\u011b\u011c"+ - "\7-\2\2\u011c=\3\2\2\2\u011d\u011e\7/\2\2\u011e?\3\2\2\2\u011f\u0120\7"+ - ">\2\2\u0120\u0121\7>\2\2\u0121A\3\2\2\2\u0122\u0123\7@\2\2\u0123\u0124"+ - "\7@\2\2\u0124C\3\2\2\2\u0125\u0126\7@\2\2\u0126\u0127\7@\2\2\u0127\u0128"+ - "\7@\2\2\u0128E\3\2\2\2\u0129\u012a\7>\2\2\u012aG\3\2\2\2\u012b\u012c\7"+ - ">\2\2\u012c\u012d\7?\2\2\u012dI\3\2\2\2\u012e\u012f\7@\2\2\u012fK\3\2"+ - "\2\2\u0130\u0131\7@\2\2\u0131\u0132\7?\2\2\u0132M\3\2\2\2\u0133\u0134"+ - "\7?\2\2\u0134\u0135\7?\2\2\u0135O\3\2\2\2\u0136\u0137\7?\2\2\u0137\u0138"+ - "\7?\2\2\u0138\u0139\7?\2\2\u0139Q\3\2\2\2\u013a\u013b\7#\2\2\u013b\u013c"+ - "\7?\2\2\u013cS\3\2\2\2\u013d\u013e\7#\2\2\u013e\u013f\7?\2\2\u013f\u0140"+ - "\7?\2\2\u0140U\3\2\2\2\u0141\u0142\7(\2\2\u0142W\3\2\2\2\u0143\u0144\7"+ - "`\2\2\u0144Y\3\2\2\2\u0145\u0146\7~\2\2\u0146[\3\2\2\2\u0147\u0148\7("+ - "\2\2\u0148\u0149\7(\2\2\u0149]\3\2\2\2\u014a\u014b\7~\2\2\u014b\u014c"+ - "\7~\2\2\u014c_\3\2\2\2\u014d\u014e\7A\2\2\u014ea\3\2\2\2\u014f\u0150\7"+ - "<\2\2\u0150c\3\2\2\2\u0151\u0152\7-\2\2\u0152\u0153\7-\2\2\u0153e\3\2"+ - "\2\2\u0154\u0155\7/\2\2\u0155\u0156\7/\2\2\u0156g\3\2\2\2\u0157\u0158"+ - "\7?\2\2\u0158i\3\2\2\2\u0159\u015a\7-\2\2\u015a\u015b\7?\2\2\u015bk\3"+ - "\2\2\2\u015c\u015d\7/\2\2\u015d\u015e\7?\2\2\u015em\3\2\2\2\u015f\u0160"+ - "\7,\2\2\u0160\u0161\7?\2\2\u0161o\3\2\2\2\u0162\u0163\7\61\2\2\u0163\u0164"+ - "\7?\2\2\u0164q\3\2\2\2\u0165\u0166\7\'\2\2\u0166\u0167\7?\2\2\u0167s\3"+ - "\2\2\2\u0168\u0169\7(\2\2\u0169\u016a\7?\2\2\u016au\3\2\2\2\u016b\u016c"+ - "\7`\2\2\u016c\u016d\7?\2\2\u016dw\3\2\2\2\u016e\u016f\7~\2\2\u016f\u0170"+ - "\7?\2\2\u0170y\3\2\2\2\u0171\u0172\7>\2\2\u0172\u0173\7>\2\2\u0173\u0174"+ - "\7?\2\2\u0174{\3\2\2\2\u0175\u0176\7@\2\2\u0176\u0177\7@\2\2\u0177\u0178"+ - "\7?\2\2\u0178}\3\2\2\2\u0179\u017a\7@\2\2\u017a\u017b\7@\2\2\u017b\u017c"+ - "\7@\2\2\u017c\u017d\7?\2\2\u017d\177\3\2\2\2\u017e\u017f\7\60\2\2\u017f"+ - "\u0180\7\60\2\2\u0180\u0181\7?\2\2\u0181\u0081\3\2\2\2\u0182\u0184\7\62"+ - "\2\2\u0183\u0185\t\4\2\2\u0184\u0183\3\2\2\2\u0185\u0186\3\2\2\2\u0186"+ - "\u0184\3\2\2\2\u0186\u0187\3\2\2\2\u0187\u0189\3\2\2\2\u0188\u018a\t\5"+ - "\2\2\u0189\u0188\3\2\2\2\u0189\u018a\3\2\2\2\u018a\u0083\3\2\2\2\u018b"+ - "\u018c\7\62\2\2\u018c\u018e\t\6\2\2\u018d\u018f\t\7\2\2\u018e\u018d\3"+ - "\2\2\2\u018f\u0190\3\2\2\2\u0190\u018e\3\2\2\2\u0190\u0191\3\2\2\2\u0191"+ - "\u0193\3\2\2\2\u0192\u0194\t\5\2\2\u0193\u0192\3\2\2\2\u0193\u0194\3\2"+ - "\2\2\u0194\u0085\3\2\2\2\u0195\u019e\7\62\2\2\u0196\u019a\t\b\2\2\u0197"+ - "\u0199\t\t\2\2\u0198\u0197\3\2\2\2\u0199\u019c\3\2\2\2\u019a\u0198\3\2"+ - "\2\2\u019a\u019b\3\2\2\2\u019b\u019e\3\2\2\2\u019c\u019a\3\2\2\2\u019d"+ - "\u0195\3\2\2\2\u019d\u0196\3\2\2\2\u019e\u01a0\3\2\2\2\u019f\u01a1\t\n"+ - "\2\2\u01a0\u019f\3\2\2\2\u01a0\u01a1\3\2\2\2\u01a1\u0087\3\2\2\2\u01a2"+ - "\u01ab\7\62\2\2\u01a3\u01a7\t\b\2\2\u01a4\u01a6\t\t\2\2\u01a5\u01a4\3"+ - "\2\2\2\u01a6\u01a9\3\2\2\2\u01a7\u01a5\3\2\2\2\u01a7\u01a8\3\2\2\2\u01a8"+ - "\u01ab\3\2\2\2\u01a9\u01a7\3\2\2\2\u01aa\u01a2\3\2\2\2\u01aa\u01a3\3\2"+ - "\2\2\u01ab\u01ac\3\2\2\2\u01ac\u01b0\5\24\n\2\u01ad\u01af\t\t\2\2\u01ae"+ - "\u01ad\3\2\2\2\u01af\u01b2\3\2\2\2\u01b0\u01ae\3\2\2\2\u01b0\u01b1\3\2"+ - "\2\2\u01b1\u01bc\3\2\2\2\u01b2\u01b0\3\2\2\2\u01b3\u01b5\t\13\2\2\u01b4"+ - "\u01b6\t\f\2\2\u01b5\u01b4\3\2\2\2\u01b5\u01b6\3\2\2\2\u01b6\u01b8\3\2"+ - "\2\2\u01b7\u01b9\t\t\2\2\u01b8\u01b7\3\2\2\2\u01b9\u01ba\3\2\2\2\u01ba"+ - "\u01b8\3\2\2\2\u01ba\u01bb\3\2\2\2\u01bb\u01bd\3\2\2\2\u01bc\u01b3\3\2"+ - "\2\2\u01bc\u01bd\3\2\2\2\u01bd\u01bf\3\2\2\2\u01be\u01c0\t\r\2\2\u01bf"+ - "\u01be\3\2\2\2\u01bf\u01c0\3\2\2\2\u01c0\u0089\3\2\2\2\u01c1\u01c9\7$"+ - "\2\2\u01c2\u01c3\7^\2\2\u01c3\u01c8\7$\2\2\u01c4\u01c5\7^\2\2\u01c5\u01c8"+ - "\7^\2\2\u01c6\u01c8\n\16\2\2\u01c7\u01c2\3\2\2\2\u01c7\u01c4\3\2\2\2\u01c7"+ - "\u01c6\3\2\2\2\u01c8\u01cb\3\2\2\2\u01c9\u01ca\3\2\2\2\u01c9\u01c7\3\2"+ - "\2\2\u01ca\u01cc\3\2\2\2\u01cb\u01c9\3\2\2\2\u01cc\u01cd\7$\2\2\u01cd"+ - "\u01ce\bE\4\2\u01ce\u008b\3\2\2\2\u01cf\u01d0\7)\2\2\u01d0\u01d1\13\2"+ - "\2\2\u01d1\u01d2\7)\2\2\u01d2\u01d3\bF\5\2\u01d3\u008d\3\2\2\2\u01d4\u01d5"+ - "\7v\2\2\u01d5\u01d6\7t\2\2\u01d6\u01d7\7w\2\2\u01d7\u01d8\7g\2\2\u01d8"+ - "\u008f\3\2\2\2\u01d9\u01da\7h\2\2\u01da\u01db\7c\2\2\u01db\u01dc\7n\2"+ - "\2\u01dc\u01dd\7u\2\2\u01dd\u01de\7g\2\2\u01de\u0091\3\2\2\2\u01df\u01e0"+ - "\7p\2\2\u01e0\u01e1\7w\2\2\u01e1\u01e2\7n\2\2\u01e2\u01e3\7n\2\2\u01e3"+ - "\u0093\3\2\2\2\u01e4\u01e6\5\u0098L\2\u01e5\u01e7\5\u0096K\2\u01e6\u01e5"+ - "\3\2\2\2\u01e6\u01e7\3\2\2\2\u01e7\u01e8\3\2\2\2\u01e8\u01e9\6J\2\2\u01e9"+ - "\u01ea\bJ\6\2\u01ea\u0095\3\2\2\2\u01eb\u01ed\7\"\2\2\u01ec\u01eb\3\2"+ - "\2\2\u01ed\u01f0\3\2\2\2\u01ee\u01ec\3\2\2\2\u01ee\u01ef\3\2\2\2\u01ef"+ - "\u01f1\3\2\2\2\u01f0\u01ee\3\2\2\2\u01f1\u01f5\7>\2\2\u01f2\u01f4\7\""+ - "\2\2\u01f3\u01f2\3\2\2\2\u01f4\u01f7\3\2\2\2\u01f5\u01f3\3\2\2\2\u01f5"+ - "\u01f6\3\2\2\2\u01f6\u01f8\3\2\2\2\u01f7\u01f5\3\2\2\2\u01f8\u01fa\5\u0098"+ - "L\2\u01f9\u01fb\5\u0096K\2\u01fa\u01f9\3\2\2\2\u01fa\u01fb\3\2\2\2\u01fb"+ - "\u01ff\3\2\2\2\u01fc\u01fe\7\"\2\2\u01fd\u01fc\3\2\2\2\u01fe\u0201\3\2"+ - "\2\2\u01ff\u01fd\3\2\2\2\u01ff\u0200\3\2\2\2\u0200\u0215\3\2\2\2\u0201"+ - "\u01ff\3\2\2\2\u0202\u0206\5\26\13\2\u0203\u0205\7\"\2\2\u0204\u0203\3"+ - "\2\2\2\u0205\u0208\3\2\2\2\u0206\u0204\3\2\2\2\u0206\u0207\3\2\2\2\u0207"+ - "\u0209\3\2\2\2\u0208\u0206\3\2\2\2\u0209\u020b\5\u0098L\2\u020a\u020c"+ - "\5\u0096K\2\u020b\u020a\3\2\2\2\u020b\u020c\3\2\2\2\u020c\u0210\3\2\2"+ - "\2\u020d\u020f\7\"\2\2\u020e\u020d\3\2\2\2\u020f\u0212\3\2\2\2\u0210\u020e"+ - "\3\2\2\2\u0210\u0211\3\2\2\2\u0211\u0214\3\2\2\2\u0212\u0210\3\2\2\2\u0213"+ - "\u0202\3\2\2\2\u0214\u0217\3\2\2\2\u0215\u0213\3\2\2\2\u0215\u0216\3\2"+ - "\2\2\u0216\u0218\3\2\2\2\u0217\u0215\3\2\2\2\u0218\u0219\7@\2\2\u0219"+ - "\u0097\3\2\2\2\u021a\u021e\t\17\2\2\u021b\u021d\t\20\2\2\u021c\u021b\3"+ - "\2\2\2\u021d\u0220\3\2\2\2\u021e\u021c\3\2\2\2\u021e\u021f\3\2\2\2\u021f"+ - "\u0099\3\2\2\2\u0220\u021e\3\2\2\2\u0221\u022a\7\62\2\2\u0222\u0226\t"+ - "\b\2\2\u0223\u0225\t\t\2\2\u0224\u0223\3\2\2\2\u0225\u0228\3\2\2\2\u0226"+ - "\u0224\3\2\2\2\u0226\u0227\3\2\2\2\u0227\u022a\3\2\2\2\u0228\u0226\3\2"+ - "\2\2\u0229\u0221\3\2\2\2\u0229\u0222\3\2\2\2\u022a\u022b\3\2\2\2\u022b"+ - "\u022c\bM\7\2\u022c\u009b\3\2\2\2\u022d\u0231\t\17\2\2\u022e\u0230\t\20"+ - "\2\2\u022f\u022e\3\2\2\2\u0230\u0233\3\2\2\2\u0231\u022f\3\2\2\2\u0231"+ - "\u0232\3\2\2\2\u0232\u0234\3\2\2\2\u0233\u0231\3\2\2\2\u0234\u0235\bN"+ - "\7\2\u0235\u009d\3\2\2\2%\2\3\u00a1\u00ab\u00b5\u00ba\u0186\u0189\u0190"+ - "\u0193\u019a\u019d\u01a0\u01a7\u01aa\u01b0\u01b5\u01ba\u01bc\u01bf\u01c7"+ - "\u01c9\u01e6\u01ee\u01f5\u01fa\u01ff\u0206\u020b\u0210\u0215\u021e\u0226"+ - "\u0229\u0231\b\b\2\2\4\3\2\3E\2\3F\3\3J\4\4\2\2"; + "I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\3\2\6\2\u009e\n\2\r\2\16\2\u009f\3\2\3\2"+ + "\3\3\3\3\3\3\3\3\7\3\u00a8\n\3\f\3\16\3\u00ab\13\3\3\3\3\3\3\3\3\3\3\3"+ + "\7\3\u00b2\n\3\f\3\16\3\u00b5\13\3\3\3\3\3\5\3\u00b9\n\3\3\3\3\3\3\4\3"+ + "\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3"+ + "\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3"+ + "\17\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3"+ + "\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3"+ + "\24\3\24\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3"+ + "\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\32\3\32\3\33\3\33\3"+ + "\34\3\34\3\35\3\35\3\36\3\36\3\37\3\37\3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3"+ + "\"\3#\3#\3$\3$\3$\3%\3%\3&\3&\3&\3\'\3\'\3\'\3(\3(\3(\3(\3)\3)\3)\3*\3"+ + "*\3*\3*\3+\3+\3,\3,\3-\3-\3.\3.\3.\3/\3/\3/\3\60\3\60\3\61\3\61\3\62\3"+ + "\62\3\62\3\63\3\63\3\63\3\64\3\64\3\65\3\65\3\65\3\66\3\66\3\66\3\67\3"+ + "\67\3\67\38\38\38\39\39\39\3:\3:\3:\3;\3;\3;\3<\3<\3<\3=\3=\3=\3=\3>\3"+ + ">\3>\3>\3?\3?\3?\3?\3?\3@\3@\6@\u017f\n@\r@\16@\u0180\3@\5@\u0184\n@\3"+ + "A\3A\3A\6A\u0189\nA\rA\16A\u018a\3A\5A\u018e\nA\3B\3B\3B\7B\u0193\nB\f"+ + "B\16B\u0196\13B\5B\u0198\nB\3B\5B\u019b\nB\3C\3C\3C\7C\u01a0\nC\fC\16"+ + "C\u01a3\13C\5C\u01a5\nC\3C\3C\7C\u01a9\nC\fC\16C\u01ac\13C\3C\3C\5C\u01b0"+ + "\nC\3C\6C\u01b3\nC\rC\16C\u01b4\5C\u01b7\nC\3C\5C\u01ba\nC\3D\3D\3D\3"+ + "D\3D\3D\7D\u01c2\nD\fD\16D\u01c5\13D\3D\3D\3D\3E\3E\3E\3E\3E\3F\3F\3F"+ + "\3F\3F\3G\3G\3G\3G\3G\3G\3H\3H\3H\3H\3H\3I\3I\5I\u01e1\nI\3I\3I\3I\3J"+ + "\7J\u01e7\nJ\fJ\16J\u01ea\13J\3J\3J\7J\u01ee\nJ\fJ\16J\u01f1\13J\3J\3"+ + "J\5J\u01f5\nJ\3J\7J\u01f8\nJ\fJ\16J\u01fb\13J\3J\3J\7J\u01ff\nJ\fJ\16"+ + "J\u0202\13J\3J\3J\5J\u0206\nJ\3J\7J\u0209\nJ\fJ\16J\u020c\13J\7J\u020e"+ + "\nJ\fJ\16J\u0211\13J\3J\3J\3K\3K\7K\u0217\nK\fK\16K\u021a\13K\3L\3L\3"+ + "L\7L\u021f\nL\fL\16L\u0222\13L\5L\u0224\nL\3L\3L\3M\3M\7M\u022a\nM\fM"+ + "\16M\u022d\13M\3M\3M\5\u00a9\u00b3\u01c3\2N\4\3\6\4\b\5\n\6\f\7\16\b\20"+ + "\t\22\n\24\13\26\f\30\r\32\16\34\17\36\20 \21\"\22$\23&\24(\25*\26,\27"+ + ".\30\60\31\62\32\64\33\66\348\35:\36<\37> @!B\"D#F$H%J&L\'N(P)R*T+V,X"+ + "-Z.\\/^\60`\61b\62d\63f\64h\65j\66l\67n8p9r:t;v|?~@\u0080A\u0082"+ + "B\u0084C\u0086D\u0088E\u008aF\u008cG\u008eH\u0090I\u0092J\u0094\2\u0096"+ + "K\u0098L\u009aM\4\2\3\21\5\2\13\f\17\17\"\"\4\2\f\f\17\17\3\2\629\4\2"+ + "NNnn\4\2ZZzz\5\2\62;CHch\3\2\63;\3\2\62;\b\2FFHHNNffhhnn\4\2GGgg\4\2-"+ + "-//\4\2HHhh\4\2$$^^\5\2C\\aac|\6\2\62;C\\aac|\u024f\2\4\3\2\2\2\2\6\3"+ + "\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16\3\2\2\2\2\20\3\2\2\2\2"+ + "\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3\2\2\2\2\32\3\2\2\2\2\34\3"+ + "\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$\3\2\2\2\2&\3\2\2\2\2(\3"+ + "\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3\2\2\2\2\62\3\2\2\2\2\64"+ + "\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2\2<\3\2\2\2\2>\3\2\2\2\2@\3"+ + "\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H\3\2\2\2\2J\3\2\2\2\2L\3\2\2"+ + "\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2\2\2V\3\2\2\2\2X\3\2\2\2\2"+ + "Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2\2\2b\3\2\2\2\2d\3\2\2\2\2f\3"+ + "\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2n\3\2\2\2\2p\3\2\2\2\2r\3\2\2"+ + "\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2\2\2\2|\3\2\2\2\2~\3\2\2\2\2"+ + "\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3\2\2\2\2\u0086\3\2\2\2\2\u0088"+ + "\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2\2\u008e\3\2\2\2\2\u0090\3\2\2"+ + "\2\2\u0092\3\2\2\2\2\u0096\3\2\2\2\3\u0098\3\2\2\2\3\u009a\3\2\2\2\4\u009d"+ + "\3\2\2\2\6\u00b8\3\2\2\2\b\u00bc\3\2\2\2\n\u00be\3\2\2\2\f\u00c0\3\2\2"+ + "\2\16\u00c2\3\2\2\2\20\u00c4\3\2\2\2\22\u00c6\3\2\2\2\24\u00c8\3\2\2\2"+ + "\26\u00cc\3\2\2\2\30\u00ce\3\2\2\2\32\u00d0\3\2\2\2\34\u00d3\3\2\2\2\36"+ + "\u00d8\3\2\2\2 \u00de\3\2\2\2\"\u00e1\3\2\2\2$\u00e5\3\2\2\2&\u00ee\3"+ + "\2\2\2(\u00f4\3\2\2\2*\u00fb\3\2\2\2,\u00ff\3\2\2\2.\u0103\3\2\2\2\60"+ + "\u0109\3\2\2\2\62\u010f\3\2\2\2\64\u0111\3\2\2\2\66\u0113\3\2\2\28\u0115"+ + "\3\2\2\2:\u0117\3\2\2\2<\u0119\3\2\2\2>\u011b\3\2\2\2@\u011d\3\2\2\2B"+ + "\u0120\3\2\2\2D\u0123\3\2\2\2F\u0127\3\2\2\2H\u0129\3\2\2\2J\u012c\3\2"+ + "\2\2L\u012e\3\2\2\2N\u0131\3\2\2\2P\u0134\3\2\2\2R\u0138\3\2\2\2T\u013b"+ + "\3\2\2\2V\u013f\3\2\2\2X\u0141\3\2\2\2Z\u0143\3\2\2\2\\\u0145\3\2\2\2"+ + "^\u0148\3\2\2\2`\u014b\3\2\2\2b\u014d\3\2\2\2d\u014f\3\2\2\2f\u0152\3"+ + "\2\2\2h\u0155\3\2\2\2j\u0157\3\2\2\2l\u015a\3\2\2\2n\u015d\3\2\2\2p\u0160"+ + "\3\2\2\2r\u0163\3\2\2\2t\u0166\3\2\2\2v\u0169\3\2\2\2x\u016c\3\2\2\2z"+ + "\u016f\3\2\2\2|\u0173\3\2\2\2~\u0177\3\2\2\2\u0080\u017c\3\2\2\2\u0082"+ + "\u0185\3\2\2\2\u0084\u0197\3\2\2\2\u0086\u01a4\3\2\2\2\u0088\u01bb\3\2"+ + "\2\2\u008a\u01c9\3\2\2\2\u008c\u01ce\3\2\2\2\u008e\u01d3\3\2\2\2\u0090"+ + "\u01d9\3\2\2\2\u0092\u01de\3\2\2\2\u0094\u01e8\3\2\2\2\u0096\u0214\3\2"+ + "\2\2\u0098\u0223\3\2\2\2\u009a\u0227\3\2\2\2\u009c\u009e\t\2\2\2\u009d"+ + "\u009c\3\2\2\2\u009e\u009f\3\2\2\2\u009f\u009d\3\2\2\2\u009f\u00a0\3\2"+ + "\2\2\u00a0\u00a1\3\2\2\2\u00a1\u00a2\b\2\2\2\u00a2\5\3\2\2\2\u00a3\u00a4"+ + "\7\61\2\2\u00a4\u00a5\7\61\2\2\u00a5\u00a9\3\2\2\2\u00a6\u00a8\13\2\2"+ + "\2\u00a7\u00a6\3\2\2\2\u00a8\u00ab\3\2\2\2\u00a9\u00aa\3\2\2\2\u00a9\u00a7"+ + "\3\2\2\2\u00aa\u00ac\3\2\2\2\u00ab\u00a9\3\2\2\2\u00ac\u00b9\t\3\2\2\u00ad"+ + "\u00ae\7\61\2\2\u00ae\u00af\7,\2\2\u00af\u00b3\3\2\2\2\u00b0\u00b2\13"+ + "\2\2\2\u00b1\u00b0\3\2\2\2\u00b2\u00b5\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b3"+ + "\u00b1\3\2\2\2\u00b4\u00b6\3\2\2\2\u00b5\u00b3\3\2\2\2\u00b6\u00b7\7,"+ + "\2\2\u00b7\u00b9\7\61\2\2\u00b8\u00a3\3\2\2\2\u00b8\u00ad\3\2\2\2\u00b9"+ + "\u00ba\3\2\2\2\u00ba\u00bb\b\3\2\2\u00bb\7\3\2\2\2\u00bc\u00bd\7}\2\2"+ + "\u00bd\t\3\2\2\2\u00be\u00bf\7\177\2\2\u00bf\13\3\2\2\2\u00c0\u00c1\7"+ + "]\2\2\u00c1\r\3\2\2\2\u00c2\u00c3\7_\2\2\u00c3\17\3\2\2\2\u00c4\u00c5"+ + "\7*\2\2\u00c5\21\3\2\2\2\u00c6\u00c7\7+\2\2\u00c7\23\3\2\2\2\u00c8\u00c9"+ + "\7\60\2\2\u00c9\u00ca\3\2\2\2\u00ca\u00cb\b\n\3\2\u00cb\25\3\2\2\2\u00cc"+ + "\u00cd\7.\2\2\u00cd\27\3\2\2\2\u00ce\u00cf\7=\2\2\u00cf\31\3\2\2\2\u00d0"+ + "\u00d1\7k\2\2\u00d1\u00d2\7h\2\2\u00d2\33\3\2\2\2\u00d3\u00d4\7g\2\2\u00d4"+ + "\u00d5\7n\2\2\u00d5\u00d6\7u\2\2\u00d6\u00d7\7g\2\2\u00d7\35\3\2\2\2\u00d8"+ + "\u00d9\7y\2\2\u00d9\u00da\7j\2\2\u00da\u00db\7k\2\2\u00db\u00dc\7n\2\2"+ + "\u00dc\u00dd\7g\2\2\u00dd\37\3\2\2\2\u00de\u00df\7f\2\2\u00df\u00e0\7"+ + "q\2\2\u00e0!\3\2\2\2\u00e1\u00e2\7h\2\2\u00e2\u00e3\7q\2\2\u00e3\u00e4"+ + "\7t\2\2\u00e4#\3\2\2\2\u00e5\u00e6\7e\2\2\u00e6\u00e7\7q\2\2\u00e7\u00e8"+ + "\7p\2\2\u00e8\u00e9\7v\2\2\u00e9\u00ea\7k\2\2\u00ea\u00eb\7p\2\2\u00eb"+ + "\u00ec\7w\2\2\u00ec\u00ed\7g\2\2\u00ed%\3\2\2\2\u00ee\u00ef\7d\2\2\u00ef"+ + "\u00f0\7t\2\2\u00f0\u00f1\7g\2\2\u00f1\u00f2\7c\2\2\u00f2\u00f3\7m\2\2"+ + "\u00f3\'\3\2\2\2\u00f4\u00f5\7t\2\2\u00f5\u00f6\7g\2\2\u00f6\u00f7\7v"+ + "\2\2\u00f7\u00f8\7w\2\2\u00f8\u00f9\7t\2\2\u00f9\u00fa\7p\2\2\u00fa)\3"+ + "\2\2\2\u00fb\u00fc\7p\2\2\u00fc\u00fd\7g\2\2\u00fd\u00fe\7y\2\2\u00fe"+ + "+\3\2\2\2\u00ff\u0100\7v\2\2\u0100\u0101\7t\2\2\u0101\u0102\7{\2\2\u0102"+ + "-\3\2\2\2\u0103\u0104\7e\2\2\u0104\u0105\7c\2\2\u0105\u0106\7v\2\2\u0106"+ + "\u0107\7e\2\2\u0107\u0108\7j\2\2\u0108/\3\2\2\2\u0109\u010a\7v\2\2\u010a"+ + "\u010b\7j\2\2\u010b\u010c\7t\2\2\u010c\u010d\7q\2\2\u010d\u010e\7y\2\2"+ + "\u010e\61\3\2\2\2\u010f\u0110\7#\2\2\u0110\63\3\2\2\2\u0111\u0112\7\u0080"+ + "\2\2\u0112\65\3\2\2\2\u0113\u0114\7,\2\2\u0114\67\3\2\2\2\u0115\u0116"+ + "\7\61\2\2\u01169\3\2\2\2\u0117\u0118\7\'\2\2\u0118;\3\2\2\2\u0119\u011a"+ + "\7-\2\2\u011a=\3\2\2\2\u011b\u011c\7/\2\2\u011c?\3\2\2\2\u011d\u011e\7"+ + ">\2\2\u011e\u011f\7>\2\2\u011fA\3\2\2\2\u0120\u0121\7@\2\2\u0121\u0122"+ + "\7@\2\2\u0122C\3\2\2\2\u0123\u0124\7@\2\2\u0124\u0125\7@\2\2\u0125\u0126"+ + "\7@\2\2\u0126E\3\2\2\2\u0127\u0128\7>\2\2\u0128G\3\2\2\2\u0129\u012a\7"+ + ">\2\2\u012a\u012b\7?\2\2\u012bI\3\2\2\2\u012c\u012d\7@\2\2\u012dK\3\2"+ + "\2\2\u012e\u012f\7@\2\2\u012f\u0130\7?\2\2\u0130M\3\2\2\2\u0131\u0132"+ + "\7?\2\2\u0132\u0133\7?\2\2\u0133O\3\2\2\2\u0134\u0135\7?\2\2\u0135\u0136"+ + "\7?\2\2\u0136\u0137\7?\2\2\u0137Q\3\2\2\2\u0138\u0139\7#\2\2\u0139\u013a"+ + "\7?\2\2\u013aS\3\2\2\2\u013b\u013c\7#\2\2\u013c\u013d\7?\2\2\u013d\u013e"+ + "\7?\2\2\u013eU\3\2\2\2\u013f\u0140\7(\2\2\u0140W\3\2\2\2\u0141\u0142\7"+ + "`\2\2\u0142Y\3\2\2\2\u0143\u0144\7~\2\2\u0144[\3\2\2\2\u0145\u0146\7("+ + "\2\2\u0146\u0147\7(\2\2\u0147]\3\2\2\2\u0148\u0149\7~\2\2\u0149\u014a"+ + "\7~\2\2\u014a_\3\2\2\2\u014b\u014c\7A\2\2\u014ca\3\2\2\2\u014d\u014e\7"+ + "<\2\2\u014ec\3\2\2\2\u014f\u0150\7-\2\2\u0150\u0151\7-\2\2\u0151e\3\2"+ + "\2\2\u0152\u0153\7/\2\2\u0153\u0154\7/\2\2\u0154g\3\2\2\2\u0155\u0156"+ + "\7?\2\2\u0156i\3\2\2\2\u0157\u0158\7-\2\2\u0158\u0159\7?\2\2\u0159k\3"+ + "\2\2\2\u015a\u015b\7/\2\2\u015b\u015c\7?\2\2\u015cm\3\2\2\2\u015d\u015e"+ + "\7,\2\2\u015e\u015f\7?\2\2\u015fo\3\2\2\2\u0160\u0161\7\61\2\2\u0161\u0162"+ + "\7?\2\2\u0162q\3\2\2\2\u0163\u0164\7\'\2\2\u0164\u0165\7?\2\2\u0165s\3"+ + "\2\2\2\u0166\u0167\7(\2\2\u0167\u0168\7?\2\2\u0168u\3\2\2\2\u0169\u016a"+ + "\7`\2\2\u016a\u016b\7?\2\2\u016bw\3\2\2\2\u016c\u016d\7~\2\2\u016d\u016e"+ + "\7?\2\2\u016ey\3\2\2\2\u016f\u0170\7>\2\2\u0170\u0171\7>\2\2\u0171\u0172"+ + "\7?\2\2\u0172{\3\2\2\2\u0173\u0174\7@\2\2\u0174\u0175\7@\2\2\u0175\u0176"+ + "\7?\2\2\u0176}\3\2\2\2\u0177\u0178\7@\2\2\u0178\u0179\7@\2\2\u0179\u017a"+ + "\7@\2\2\u017a\u017b\7?\2\2\u017b\177\3\2\2\2\u017c\u017e\7\62\2\2\u017d"+ + "\u017f\t\4\2\2\u017e\u017d\3\2\2\2\u017f\u0180\3\2\2\2\u0180\u017e\3\2"+ + "\2\2\u0180\u0181\3\2\2\2\u0181\u0183\3\2\2\2\u0182\u0184\t\5\2\2\u0183"+ + "\u0182\3\2\2\2\u0183\u0184\3\2\2\2\u0184\u0081\3\2\2\2\u0185\u0186\7\62"+ + "\2\2\u0186\u0188\t\6\2\2\u0187\u0189\t\7\2\2\u0188\u0187\3\2\2\2\u0189"+ + "\u018a\3\2\2\2\u018a\u0188\3\2\2\2\u018a\u018b\3\2\2\2\u018b\u018d\3\2"+ + "\2\2\u018c\u018e\t\5\2\2\u018d\u018c\3\2\2\2\u018d\u018e\3\2\2\2\u018e"+ + "\u0083\3\2\2\2\u018f\u0198\7\62\2\2\u0190\u0194\t\b\2\2\u0191\u0193\t"+ + "\t\2\2\u0192\u0191\3\2\2\2\u0193\u0196\3\2\2\2\u0194\u0192\3\2\2\2\u0194"+ + "\u0195\3\2\2\2\u0195\u0198\3\2\2\2\u0196\u0194\3\2\2\2\u0197\u018f\3\2"+ + "\2\2\u0197\u0190\3\2\2\2\u0198\u019a\3\2\2\2\u0199\u019b\t\n\2\2\u019a"+ + "\u0199\3\2\2\2\u019a\u019b\3\2\2\2\u019b\u0085\3\2\2\2\u019c\u01a5\7\62"+ + "\2\2\u019d\u01a1\t\b\2\2\u019e\u01a0\t\t\2\2\u019f\u019e\3\2\2\2\u01a0"+ + "\u01a3\3\2\2\2\u01a1\u019f\3\2\2\2\u01a1\u01a2\3\2\2\2\u01a2\u01a5\3\2"+ + "\2\2\u01a3\u01a1\3\2\2\2\u01a4\u019c\3\2\2\2\u01a4\u019d\3\2\2\2\u01a5"+ + "\u01a6\3\2\2\2\u01a6\u01aa\5\24\n\2\u01a7\u01a9\t\t\2\2\u01a8\u01a7\3"+ + "\2\2\2\u01a9\u01ac\3\2\2\2\u01aa\u01a8\3\2\2\2\u01aa\u01ab\3\2\2\2\u01ab"+ + "\u01b6\3\2\2\2\u01ac\u01aa\3\2\2\2\u01ad\u01af\t\13\2\2\u01ae\u01b0\t"+ + "\f\2\2\u01af\u01ae\3\2\2\2\u01af\u01b0\3\2\2\2\u01b0\u01b2\3\2\2\2\u01b1"+ + "\u01b3\t\t\2\2\u01b2\u01b1\3\2\2\2\u01b3\u01b4\3\2\2\2\u01b4\u01b2\3\2"+ + "\2\2\u01b4\u01b5\3\2\2\2\u01b5\u01b7\3\2\2\2\u01b6\u01ad\3\2\2\2\u01b6"+ + "\u01b7\3\2\2\2\u01b7\u01b9\3\2\2\2\u01b8\u01ba\t\r\2\2\u01b9\u01b8\3\2"+ + "\2\2\u01b9\u01ba\3\2\2\2\u01ba\u0087\3\2\2\2\u01bb\u01c3\7$\2\2\u01bc"+ + "\u01bd\7^\2\2\u01bd\u01c2\7$\2\2\u01be\u01bf\7^\2\2\u01bf\u01c2\7^\2\2"+ + "\u01c0\u01c2\n\16\2\2\u01c1\u01bc\3\2\2\2\u01c1\u01be\3\2\2\2\u01c1\u01c0"+ + "\3\2\2\2\u01c2\u01c5\3\2\2\2\u01c3\u01c4\3\2\2\2\u01c3\u01c1\3\2\2\2\u01c4"+ + "\u01c6\3\2\2\2\u01c5\u01c3\3\2\2\2\u01c6\u01c7\7$\2\2\u01c7\u01c8\bD\4"+ + "\2\u01c8\u0089\3\2\2\2\u01c9\u01ca\7)\2\2\u01ca\u01cb\13\2\2\2\u01cb\u01cc"+ + "\7)\2\2\u01cc\u01cd\bE\5\2\u01cd\u008b\3\2\2\2\u01ce\u01cf\7v\2\2\u01cf"+ + "\u01d0\7t\2\2\u01d0\u01d1\7w\2\2\u01d1\u01d2\7g\2\2\u01d2\u008d\3\2\2"+ + "\2\u01d3\u01d4\7h\2\2\u01d4\u01d5\7c\2\2\u01d5\u01d6\7n\2\2\u01d6\u01d7"+ + "\7u\2\2\u01d7\u01d8\7g\2\2\u01d8\u008f\3\2\2\2\u01d9\u01da\7p\2\2\u01da"+ + "\u01db\7w\2\2\u01db\u01dc\7n\2\2\u01dc\u01dd\7n\2\2\u01dd\u0091\3\2\2"+ + "\2\u01de\u01e0\5\u0096K\2\u01df\u01e1\5\u0094J\2\u01e0\u01df\3\2\2\2\u01e0"+ + "\u01e1\3\2\2\2\u01e1\u01e2\3\2\2\2\u01e2\u01e3\6I\2\2\u01e3\u01e4\bI\6"+ + "\2\u01e4\u0093\3\2\2\2\u01e5\u01e7\7\"\2\2\u01e6\u01e5\3\2\2\2\u01e7\u01ea"+ + "\3\2\2\2\u01e8\u01e6\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9\u01eb\3\2\2\2\u01ea"+ + "\u01e8\3\2\2\2\u01eb\u01ef\7>\2\2\u01ec\u01ee\7\"\2\2\u01ed\u01ec\3\2"+ + "\2\2\u01ee\u01f1\3\2\2\2\u01ef\u01ed\3\2\2\2\u01ef\u01f0\3\2\2\2\u01f0"+ + "\u01f2\3\2\2\2\u01f1\u01ef\3\2\2\2\u01f2\u01f4\5\u0096K\2\u01f3\u01f5"+ + "\5\u0094J\2\u01f4\u01f3\3\2\2\2\u01f4\u01f5\3\2\2\2\u01f5\u01f9\3\2\2"+ + "\2\u01f6\u01f8\7\"\2\2\u01f7\u01f6\3\2\2\2\u01f8\u01fb\3\2\2\2\u01f9\u01f7"+ + "\3\2\2\2\u01f9\u01fa\3\2\2\2\u01fa\u020f\3\2\2\2\u01fb\u01f9\3\2\2\2\u01fc"+ + "\u0200\5\26\13\2\u01fd\u01ff\7\"\2\2\u01fe\u01fd\3\2\2\2\u01ff\u0202\3"+ + "\2\2\2\u0200\u01fe\3\2\2\2\u0200\u0201\3\2\2\2\u0201\u0203\3\2\2\2\u0202"+ + "\u0200\3\2\2\2\u0203\u0205\5\u0096K\2\u0204\u0206\5\u0094J\2\u0205\u0204"+ + "\3\2\2\2\u0205\u0206\3\2\2\2\u0206\u020a\3\2\2\2\u0207\u0209\7\"\2\2\u0208"+ + "\u0207\3\2\2\2\u0209\u020c\3\2\2\2\u020a\u0208\3\2\2\2\u020a\u020b\3\2"+ + "\2\2\u020b\u020e\3\2\2\2\u020c\u020a\3\2\2\2\u020d\u01fc\3\2\2\2\u020e"+ + "\u0211\3\2\2\2\u020f\u020d\3\2\2\2\u020f\u0210\3\2\2\2\u0210\u0212\3\2"+ + "\2\2\u0211\u020f\3\2\2\2\u0212\u0213\7@\2\2\u0213\u0095\3\2\2\2\u0214"+ + "\u0218\t\17\2\2\u0215\u0217\t\20\2\2\u0216\u0215\3\2\2\2\u0217\u021a\3"+ + "\2\2\2\u0218\u0216\3\2\2\2\u0218\u0219\3\2\2\2\u0219\u0097\3\2\2\2\u021a"+ + "\u0218\3\2\2\2\u021b\u0224\7\62\2\2\u021c\u0220\t\b\2\2\u021d\u021f\t"+ + "\t\2\2\u021e\u021d\3\2\2\2\u021f\u0222\3\2\2\2\u0220\u021e\3\2\2\2\u0220"+ + "\u0221\3\2\2\2\u0221\u0224\3\2\2\2\u0222\u0220\3\2\2\2\u0223\u021b\3\2"+ + "\2\2\u0223\u021c\3\2\2\2\u0224\u0225\3\2\2\2\u0225\u0226\bL\7\2\u0226"+ + "\u0099\3\2\2\2\u0227\u022b\t\17\2\2\u0228\u022a\t\20\2\2\u0229\u0228\3"+ + "\2\2\2\u022a\u022d\3\2\2\2\u022b\u0229\3\2\2\2\u022b\u022c\3\2\2\2\u022c"+ + "\u022e\3\2\2\2\u022d\u022b\3\2\2\2\u022e\u022f\bM\7\2\u022f\u009b\3\2"+ + "\2\2%\2\3\u009f\u00a9\u00b3\u00b8\u0180\u0183\u018a\u018d\u0194\u0197"+ + "\u019a\u01a1\u01a4\u01aa\u01af\u01b4\u01b6\u01b9\u01c1\u01c3\u01e0\u01e8"+ + "\u01ef\u01f4\u01f9\u0200\u0205\u020a\u020f\u0218\u0220\u0223\u022b\b\b"+ + "\2\2\4\3\2\3D\2\3E\3\3I\4\4\2\2"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/plugins/lang-painless/src/main/java/org/elasticsearch/painless/PainlessParser.java b/plugins/lang-painless/src/main/java/org/elasticsearch/painless/PainlessParser.java index e7b331de661..53c6eb38446 100644 --- a/plugins/lang-painless/src/main/java/org/elasticsearch/painless/PainlessParser.java +++ b/plugins/lang-painless/src/main/java/org/elasticsearch/painless/PainlessParser.java @@ -36,9 +36,8 @@ class PainlessParser extends Parser { LTE=35, GT=36, GTE=37, EQ=38, EQR=39, NE=40, NER=41, BWAND=42, BWXOR=43, BWOR=44, BOOLAND=45, BOOLOR=46, COND=47, COLON=48, INCR=49, DECR=50, ASSIGN=51, AADD=52, ASUB=53, AMUL=54, ADIV=55, AREM=56, AAND=57, AXOR=58, AOR=59, - ALSH=60, ARSH=61, AUSH=62, ACAT=63, OCTAL=64, HEX=65, INTEGER=66, DECIMAL=67, - STRING=68, CHAR=69, TRUE=70, FALSE=71, NULL=72, TYPE=73, ID=74, EXTINTEGER=75, - EXTID=76; + ALSH=60, ARSH=61, AUSH=62, OCTAL=63, HEX=64, INTEGER=65, DECIMAL=66, STRING=67, + CHAR=68, TRUE=69, FALSE=70, NULL=71, TYPE=72, ID=73, EXTINTEGER=74, EXTID=75; public static final int RULE_source = 0, RULE_statement = 1, RULE_block = 2, RULE_empty = 3, RULE_emptyscope = 4, RULE_initializer = 5, RULE_afterthought = 6, RULE_declaration = 7, RULE_decltype = 8, @@ -60,8 +59,8 @@ class PainlessParser extends Parser { "'/'", "'%'", "'+'", "'-'", "'<<'", "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", "'/='", - "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", "'..='", null, - null, null, null, null, null, "'true'", "'false'", "'null'" + "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, + null, null, null, null, "'true'", "'false'", "'null'" }; private static final String[] _SYMBOLIC_NAMES = { null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", @@ -70,9 +69,9 @@ class PainlessParser extends Parser { "MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", "BWXOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", - "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "ACAT", - "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "CHAR", "TRUE", "FALSE", - "NULL", "TYPE", "ID", "EXTINTEGER", "EXTID" + "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", + "HEX", "INTEGER", "DECIMAL", "STRING", "CHAR", "TRUE", "FALSE", "NULL", + "TYPE", "ID", "EXTINTEGER", "EXTID" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -162,7 +161,7 @@ class PainlessParser extends Parser { setState(53); _errHandler.sync(this); _la = _input.LA(1); - } while ( (((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OCTAL - 64)) | (1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0) ); + } while ( (((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0) ); setState(55); match(EOF); } @@ -469,7 +468,7 @@ class PainlessParser extends Parser { match(LP); setState(86); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OCTAL - 64)) | (1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0)) { { setState(85); initializer(); @@ -480,7 +479,7 @@ class PainlessParser extends Parser { match(SEMICOLON); setState(90); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OCTAL - 64)) | (1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0)) { { setState(89); expression(0); @@ -491,7 +490,7 @@ class PainlessParser extends Parser { match(SEMICOLON); setState(94); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OCTAL - 64)) | (1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0)) { { setState(93); afterthought(); @@ -731,7 +730,7 @@ class PainlessParser extends Parser { setState(140); _errHandler.sync(this); _la = _input.LA(1); - } while ( (((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OCTAL - 64)) | (1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0) ); + } while ( (((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0) ); setState(142); match(RBRACK); } @@ -1540,7 +1539,7 @@ class PainlessParser extends Parser { _prevctx = _localctx; setState(208); _la = _input.LA(1); - if ( !(((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OCTAL - 64)) | (1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)))) != 0)) ) { + if ( !(((((_la - 63)) & ~0x3f) == 0 && ((1L << (_la - 63)) & ((1L << (OCTAL - 63)) | (1L << (HEX - 63)) | (1L << (INTEGER - 63)) | (1L << (DECIMAL - 63)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); @@ -2665,7 +2664,7 @@ class PainlessParser extends Parser { match(LP); setState(361); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OCTAL - 64)) | (1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR) | (1L << OCTAL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)) | (1L << (STRING - 64)) | (1L << (CHAR - 64)) | (1L << (TRUE - 64)) | (1L << (FALSE - 64)) | (1L << (NULL - 64)) | (1L << (TYPE - 64)) | (1L << (ID - 64)))) != 0)) { { setState(353); expression(0); @@ -2781,7 +2780,7 @@ class PainlessParser extends Parser { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3N\u0172\4\2\t\2\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3M\u0172\4\2\t\2\4"+ "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ @@ -2809,8 +2808,8 @@ class PainlessParser extends Parser { "\27\5\27\u015a\n\27\5\27\u015c\n\27\3\30\3\30\3\30\5\30\u0161\n\30\3\31"+ "\3\31\3\31\3\31\7\31\u0167\n\31\f\31\16\31\u016a\13\31\5\31\u016c\n\31"+ "\3\31\3\31\3\32\3\32\3\32\2\3\30\33\2\4\6\b\n\f\16\20\22\24\26\30\32\34"+ - "\36 \"$&(*,.\60\62\2\f\4\2\32\33\37 \3\2\65@\3\2BE\3\2\34\36\3\2\37 \3"+ - "\2!#\3\2$\'\3\2(+\3\2MN\3\2\63\64\u01b2\2\65\3\2\2\2\4\u0088\3\2\2\2\6"+ + "\36 \"$&(*,.\60\62\2\f\4\2\32\33\37 \3\2\65@\3\2AD\3\2\34\36\3\2\37 \3"+ + "\2!#\3\2$\'\3\2(+\3\2LM\3\2\63\64\u01b2\2\65\3\2\2\2\4\u0088\3\2\2\2\6"+ "\u0093\3\2\2\2\b\u0097\3\2\2\2\n\u0099\3\2\2\2\f\u009e\3\2\2\2\16\u00a0"+ "\3\2\2\2\20\u00a2\3\2\2\2\22\u00ab\3\2\2\2\24\u00b3\3\2\2\2\26\u00b8\3"+ "\2\2\2\30\u00de\3\2\2\2\32\u010f\3\2\2\2\34\u0111\3\2\2\2\36\u011f\3\2"+ @@ -2846,20 +2845,20 @@ class PainlessParser extends Parser { "\r\3\2\2\2\u00a0\u00a1\5\30\r\2\u00a1\17\3\2\2\2\u00a2\u00a3\5\22\n\2"+ "\u00a3\u00a8\5\24\13\2\u00a4\u00a5\7\f\2\2\u00a5\u00a7\5\24\13\2\u00a6"+ "\u00a4\3\2\2\2\u00a7\u00aa\3\2\2\2\u00a8\u00a6\3\2\2\2\u00a8\u00a9\3\2"+ - "\2\2\u00a9\21\3\2\2\2\u00aa\u00a8\3\2\2\2\u00ab\u00b0\7K\2\2\u00ac\u00ad"+ + "\2\2\u00a9\21\3\2\2\2\u00aa\u00a8\3\2\2\2\u00ab\u00b0\7J\2\2\u00ac\u00ad"+ "\7\7\2\2\u00ad\u00af\7\b\2\2\u00ae\u00ac\3\2\2\2\u00af\u00b2\3\2\2\2\u00b0"+ "\u00ae\3\2\2\2\u00b0\u00b1\3\2\2\2\u00b1\23\3\2\2\2\u00b2\u00b0\3\2\2"+ - "\2\u00b3\u00b6\7L\2\2\u00b4\u00b5\7\65\2\2\u00b5\u00b7\5\30\r\2\u00b6"+ + "\2\u00b3\u00b6\7K\2\2\u00b4\u00b5\7\65\2\2\u00b5\u00b7\5\30\r\2\u00b6"+ "\u00b4\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\25\3\2\2\2\u00b8\u00b9\7\30\2"+ - "\2\u00b9\u00ba\7\t\2\2\u00ba\u00bb\7K\2\2\u00bb\u00bc\7L\2\2\u00bc\u00bd"+ + "\2\u00b9\u00ba\7\t\2\2\u00ba\u00bb\7J\2\2\u00bb\u00bc\7K\2\2\u00bc\u00bd"+ "\3\2\2\2\u00bd\u00c0\7\n\2\2\u00be\u00c1\5\6\4\2\u00bf\u00c1\5\n\6\2\u00c0"+ "\u00be\3\2\2\2\u00c0\u00bf\3\2\2\2\u00c1\27\3\2\2\2\u00c2\u00c3\b\r\1"+ "\2\u00c3\u00c4\t\2\2\2\u00c4\u00df\5\30\r\20\u00c5\u00c6\7\t\2\2\u00c6"+ "\u00c7\5\22\n\2\u00c7\u00c8\7\n\2\2\u00c8\u00c9\5\30\r\17\u00c9\u00df"+ "\3\2\2\2\u00ca\u00cb\5\32\16\2\u00cb\u00cc\t\3\2\2\u00cc\u00cd\5\30\r"+ "\3\u00cd\u00df\3\2\2\2\u00ce\u00cf\7\t\2\2\u00cf\u00d0\5\30\r\2\u00d0"+ - "\u00d1\7\n\2\2\u00d1\u00df\3\2\2\2\u00d2\u00df\t\4\2\2\u00d3\u00df\7G"+ - "\2\2\u00d4\u00df\7H\2\2\u00d5\u00df\7I\2\2\u00d6\u00df\7J\2\2\u00d7\u00d8"+ + "\u00d1\7\n\2\2\u00d1\u00df\3\2\2\2\u00d2\u00df\t\4\2\2\u00d3\u00df\7F"+ + "\2\2\u00d4\u00df\7G\2\2\u00d5\u00df\7H\2\2\u00d6\u00df\7I\2\2\u00d7\u00d8"+ "\5\32\16\2\u00d8\u00d9\5\62\32\2\u00d9\u00df\3\2\2\2\u00da\u00db\5\62"+ "\32\2\u00db\u00dc\5\32\16\2\u00dc\u00df\3\2\2\2\u00dd\u00df\5\32\16\2"+ "\u00de\u00c2\3\2\2\2\u00de\u00c5\3\2\2\2\u00de\u00ca\3\2\2\2\u00de\u00ce"+ @@ -2899,21 +2898,21 @@ class PainlessParser extends Parser { "\u0130\5 \21\2\u012f\u012d\3\2\2\2\u012f\u012e\3\2\2\2\u012f\u0130\3\2"+ "\2\2\u0130!\3\2\2\2\u0131\u0134\7\13\2\2\u0132\u0135\5&\24\2\u0133\u0135"+ "\5*\26\2\u0134\u0132\3\2\2\2\u0134\u0133\3\2\2\2\u0135#\3\2\2\2\u0136"+ - "\u0137\7K\2\2\u0137\u0138\5\"\22\2\u0138%\3\2\2\2\u0139\u013a\7N\2\2\u013a"+ + "\u0137\7J\2\2\u0137\u0138\5\"\22\2\u0138%\3\2\2\2\u0139\u013a\7M\2\2\u013a"+ "\u013d\5\60\31\2\u013b\u013e\5\"\22\2\u013c\u013e\5 \21\2\u013d\u013b"+ "\3\2\2\2\u013d\u013c\3\2\2\2\u013d\u013e\3\2\2\2\u013e\'\3\2\2\2\u013f"+ - "\u0142\7L\2\2\u0140\u0143\5\"\22\2\u0141\u0143\5 \21\2\u0142\u0140\3\2"+ + "\u0142\7K\2\2\u0140\u0143\5\"\22\2\u0141\u0143\5 \21\2\u0142\u0140\3\2"+ "\2\2\u0142\u0141\3\2\2\2\u0142\u0143\3\2\2\2\u0143)\3\2\2\2\u0144\u0147"+ "\t\n\2\2\u0145\u0148\5\"\22\2\u0146\u0148\5 \21\2\u0147\u0145\3\2\2\2"+ "\u0147\u0146\3\2\2\2\u0147\u0148\3\2\2\2\u0148+\3\2\2\2\u0149\u014a\7"+ - "\26\2\2\u014a\u015b\7K\2\2\u014b\u014e\5\60\31\2\u014c\u014f\5\"\22\2"+ + "\26\2\2\u014a\u015b\7J\2\2\u014b\u014e\5\60\31\2\u014c\u014f\5\"\22\2"+ "\u014d\u014f\5 \21\2\u014e\u014c\3\2\2\2\u014e\u014d\3\2\2\2\u014e\u014f"+ "\3\2\2\2\u014f\u015c\3\2\2\2\u0150\u0151\7\7\2\2\u0151\u0152\5\30\r\2"+ "\u0152\u0153\7\b\2\2\u0153\u0155\3\2\2\2\u0154\u0150\3\2\2\2\u0155\u0156"+ "\3\2\2\2\u0156\u0154\3\2\2\2\u0156\u0157\3\2\2\2\u0157\u0159\3\2\2\2\u0158"+ "\u015a\5\"\22\2\u0159\u0158\3\2\2\2\u0159\u015a\3\2\2\2\u015a\u015c\3"+ "\2\2\2\u015b\u014b\3\2\2\2\u015b\u0154\3\2\2\2\u015c-\3\2\2\2\u015d\u0160"+ - "\7F\2\2\u015e\u0161\5\"\22\2\u015f\u0161\5 \21\2\u0160\u015e\3\2\2\2\u0160"+ + "\7E\2\2\u015e\u0161\5\"\22\2\u015f\u0161\5 \21\2\u0160\u015e\3\2\2\2\u0160"+ "\u015f\3\2\2\2\u0160\u0161\3\2\2\2\u0161/\3\2\2\2\u0162\u016b\7\t\2\2"+ "\u0163\u0168\5\30\r\2\u0164\u0165\7\f\2\2\u0165\u0167\5\30\r\2\u0166\u0164"+ "\3\2\2\2\u0167\u016a\3\2\2\2\u0168\u0166\3\2\2\2\u0168\u0169\3\2\2\2\u0169"+ diff --git a/plugins/lang-painless/src/test/java/org/elasticsearch/painless/NoSemiColonTest.java b/plugins/lang-painless/src/test/java/org/elasticsearch/painless/NoSemiColonTests.java similarity index 87% rename from plugins/lang-painless/src/test/java/org/elasticsearch/painless/NoSemiColonTest.java rename to plugins/lang-painless/src/test/java/org/elasticsearch/painless/NoSemiColonTests.java index b4807bb5b4c..e9c399e1eff 100644 --- a/plugins/lang-painless/src/test/java/org/elasticsearch/painless/NoSemiColonTest.java +++ b/plugins/lang-painless/src/test/java/org/elasticsearch/painless/NoSemiColonTests.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless; import java.util.HashMap; import java.util.Map; -public class NoSemiColonTest extends ScriptTestCase { +public class NoSemiColonTests extends ScriptTestCase { public void testIfStatement() { assertEquals(1, exec("int x = 5 if (x == 5) return 1 return 0")); @@ -46,7 +46,7 @@ public class NoSemiColonTest extends ScriptTestCase { public void testWhileStatement() { - assertEquals("aaaaaa", exec("String c = \"a\" int x while (x < 5) { c ..= \"a\" ++x } return c")); + assertEquals("aaaaaa", exec("String c = \"a\" int x while (x < 5) { ++x c += \"a\" } return c")); Object value = exec( " byte[][] b = new byte[5][5] \n" + @@ -75,24 +75,24 @@ public class NoSemiColonTest extends ScriptTestCase { } public void testDoWhileStatement() { - assertEquals("aaaaaa", exec("String c = \"a\" int x do { c ..= \"a\" ++x } while (x < 5) return c")); + assertEquals("aaaaaa", exec("String c = \"a\" int x do { c += \"a\"; ++x } while (x < 5) return c")); Object value = exec( - " long[][] l = new long[5][5] \n" + - " long x = 0, y \n" + - " \n" + - " do { \n" + - " y = 0 \n" + - " \n" + - " do { \n" + - " l[(int)x][(int)y] = x*y \n" + - " ++y \n" + - " } while (y < 5) \n" + - " \n" + - " ++x \n" + - " } while (x < 5) \n" + - " \n" + - " return l \n"); + " long[][] l = new long[5][5] \n" + + " long x = 0, y \n" + + " \n" + + " do { \n" + + " y = 0 \n" + + " \n" + + " do { \n" + + " l[(int)x][(int)y] = x*y; \n" + + " ++y \n" + + " } while (y < 5) \n" + + " \n" + + " ++x \n" + + " } while (x < 5) \n" + + " \n" + + " return l \n"); long[][] l = (long[][])value; @@ -104,7 +104,7 @@ public class NoSemiColonTest extends ScriptTestCase { } public void testForStatement() { - assertEquals("aaaaaa", exec("String c = \"a\" for (int x = 0; x < 5; ++x) c ..= \"a\" return c")); + assertEquals("aaaaaa", exec("String c = \"a\" for (int x = 0; x < 5; ++x) c += \"a\" return c")); Object value = exec( " int[][] i = new int[5][5] \n" + diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/StandaloneRunner.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/StandaloneRunner.java index 217d48a8565..03c6e65047a 100644 --- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/StandaloneRunner.java +++ b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/StandaloneRunner.java @@ -36,7 +36,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.ParseContext; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; @@ -119,7 +118,7 @@ public class StandaloneRunner extends CliTool { terminal.println("## Extracted text"); terminal.println("--------------------- BEGIN -----------------------"); - terminal.println("%s", doc.get("file.content")); + terminal.println(doc.get("file.content")); terminal.println("---------------------- END ------------------------"); terminal.println("## Metadata"); printMetadataContent(doc, AttachmentMapper.FieldNames.AUTHOR); @@ -135,18 +134,14 @@ public class StandaloneRunner extends CliTool { } private void printMetadataContent(ParseContext.Document doc, String field) { - terminal.println("- %s: %s", field, doc.get(docMapper.mappers().getMapper("file." + field).fieldType().name())); + terminal.println("- " + field + ":" + doc.get(docMapper.mappers().getMapper("file." + field).fieldType().name())); } public static byte[] copyToBytes(Path path) throws IOException { - try (InputStream is = Files.newInputStream(path)) { - if (is == null) { - throw new FileNotFoundException("Resource [" + path + "] not found in classpath"); - } - try (BytesStreamOutput out = new BytesStreamOutput()) { - copy(is, out); - return out.bytes().toBytes(); - } + try (InputStream is = Files.newInputStream(path); + BytesStreamOutput out = new BytesStreamOutput()) { + copy(is, out); + return out.bytes().toBytes(); } } @@ -177,7 +172,7 @@ public class StandaloneRunner extends CliTool { } - public static void main(String[] args) { + public static void main(String[] args) throws Exception { StandaloneRunner pluginManager = new StandaloneRunner(); pluginManager.execute(args); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java index cf97008249f..85baf00b909 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -23,6 +23,7 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; @@ -32,7 +33,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryName; import org.elasticsearch.repositories.RepositorySettings; -import org.elasticsearch.repositories.azure.AzureRepository.Defaults; import java.io.InputStream; import java.io.OutputStream; @@ -40,7 +40,7 @@ import java.net.URISyntaxException; import java.util.Locale; import java.util.Map; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getRepositorySettings; +import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getValue; import static org.elasticsearch.repositories.azure.AzureRepository.Repository; public class AzureBlobStore extends AbstractComponent implements BlobStore { @@ -57,17 +57,15 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { AzureStorageService client) throws URISyntaxException, StorageException { super(settings); this.client = client.start(); - this.container = getRepositorySettings(repositorySettings, Repository.CONTAINER, Storage.CONTAINER, Defaults.CONTAINER); + this.container = getValue(repositorySettings, Repository.CONTAINER_SETTING, Storage.CONTAINER_SETTING); this.repositoryName = name.getName(); + this.accountName = getValue(repositorySettings, Repository.ACCOUNT_SETTING, Storage.ACCOUNT_SETTING); - // NOTE: null account means to use the first one specified in config - this.accountName = getRepositorySettings(repositorySettings, Repository.ACCOUNT, Storage.ACCOUNT, null); - - String modeStr = getRepositorySettings(repositorySettings, Repository.LOCATION_MODE, Storage.LOCATION_MODE, null); - if (modeStr == null) { - this.locMode = LocationMode.PRIMARY_ONLY; - } else { + String modeStr = getValue(repositorySettings, Repository.LOCATION_MODE_SETTING, Storage.LOCATION_MODE_SETTING); + if (Strings.hasLength(modeStr)) { this.locMode = LocationMode.valueOf(modeStr.toUpperCase(Locale.ROOT)); + } else { + this.locMode = LocationMode.PRIMARY_ONLY; } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java index c154f78eeb5..657c292db31 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -23,11 +23,15 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import java.io.InputStream; import java.io.OutputStream; import java.net.URISyntaxException; import java.util.Map; +import java.util.function.Function; /** * Azure Storage Service interface @@ -37,15 +41,13 @@ public interface AzureStorageService { final class Storage { public static final String PREFIX = "cloud.azure.storage."; - - public static final String TIMEOUT = "cloud.azure.storage.timeout"; - - public static final String ACCOUNT = "repositories.azure.account"; - public static final String LOCATION_MODE = "repositories.azure.location_mode"; - public static final String CONTAINER = "repositories.azure.container"; - public static final String BASE_PATH = "repositories.azure.base_path"; - public static final String CHUNK_SIZE = "repositories.azure.chunk_size"; - public static final String COMPRESS = "repositories.azure.compress"; + public static final Setting TIMEOUT_SETTING = Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueMinutes(5), false, Setting.Scope.CLUSTER); + public static final Setting ACCOUNT_SETTING = Setting.simpleString("repositories.azure.account", false, Setting.Scope.CLUSTER); + public static final Setting CONTAINER_SETTING = Setting.simpleString("repositories.azure.container", false, Setting.Scope.CLUSTER); + public static final Setting BASE_PATH_SETTING = Setting.simpleString("repositories.azure.base_path", false, Setting.Scope.CLUSTER); + public static final Setting LOCATION_MODE_SETTING = Setting.simpleString("repositories.azure.location_mode", false, Setting.Scope.CLUSTER); + public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.azure.chunk_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); + public static final Setting COMPRESS_SETTING = Setting.boolSetting("repositories.azure.compress", false, false, Setting.Scope.CLUSTER); } boolean doesContainerExist(String account, LocationMode mode, String container); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index 3cecd810a95..74ba008c8ec 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -28,6 +28,7 @@ import com.microsoft.azure.storage.blob.CloudBlobContainer; import com.microsoft.azure.storage.blob.CloudBlockBlob; import com.microsoft.azure.storage.blob.ListBlobItem; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; @@ -41,7 +42,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.net.URISyntaxException; -import java.util.Hashtable; +import java.util.HashMap; import java.util.Map; public class AzureStorageServiceImpl extends AbstractLifecycleComponent @@ -60,7 +61,7 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent(); + this.clients = new HashMap<>(); } void createClient(AzureStorageSettings azureStorageSettings) { @@ -94,13 +95,13 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent secondaryStorage = new HashMap<>(); - TimeValue globalTimeout = settings.getAsTime(Storage.TIMEOUT, TimeValue.timeValueMinutes(5)); + TimeValue globalTimeout = Storage.TIMEOUT_SETTING.get(settings); Settings storageSettings = settings.getByPrefix(Storage.PREFIX); if (storageSettings != null) { @@ -124,27 +124,23 @@ public class AzureStorageSettings { return Tuple.tuple(primaryStorage, secondaryStorage); } - public static String getRepositorySettings(RepositorySettings repositorySettings, - String repositorySettingName, - String repositoriesSettingName, - String defaultValue) { - return repositorySettings.settings().get(repositorySettingName, - repositorySettings.globalSettings().get(repositoriesSettingName, defaultValue)); + public static T getValue(RepositorySettings repositorySettings, + Setting repositorySetting, + Setting repositoriesSetting) { + if (repositorySetting.exists(repositorySettings.settings())) { + return repositorySetting.get(repositorySettings.settings()); + } else { + return repositoriesSetting.get(repositorySettings.globalSettings()); + } } - public static ByteSizeValue getRepositorySettingsAsBytesSize(RepositorySettings repositorySettings, - String repositorySettingName, - String repositoriesSettingName, - ByteSizeValue defaultValue) { - return repositorySettings.settings().getAsBytesSize(repositorySettingName, - repositorySettings.globalSettings().getAsBytesSize(repositoriesSettingName, defaultValue)); - } - - public static Boolean getRepositorySettingsAsBoolean(RepositorySettings repositorySettings, - String repositorySettingName, - String repositoriesSettingName, - Boolean defaultValue) { - return repositorySettings.settings().getAsBoolean(repositorySettingName, - repositorySettings.globalSettings().getAsBoolean(repositoriesSettingName, defaultValue)); + public static Setting getEffectiveSetting(RepositorySettings repositorySettings, + Setting repositorySetting, + Setting repositoriesSetting) { + if (repositorySetting.exists(repositorySettings.settings())) { + return repositorySetting; + } else { + return repositoriesSetting; + } } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilter.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilter.java index 2c4e7957af3..76ac68bd436 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilter.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilter.java @@ -33,6 +33,6 @@ public class AzureStorageSettingsFilter extends AbstractComponent { // Cloud storage API settings needed to be hidden settingsFilter.addFilter(Storage.PREFIX + "*.account"); settingsFilter.addFilter(Storage.PREFIX + "*.key"); - settingsFilter.addFilter(Storage.ACCOUNT); + settingsFilter.addFilter(Storage.ACCOUNT_SETTING.getKey()); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index a3abf9b4adf..f2773bccbbd 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.snapshots.IndexShardRepository; @@ -42,10 +44,10 @@ import java.io.IOException; import java.net.URISyntaxException; import java.util.List; import java.util.Locale; +import java.util.function.Function; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getRepositorySettings; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getRepositorySettingsAsBoolean; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getRepositorySettingsAsBytesSize; +import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getEffectiveSetting; +import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getValue; /** * Azure file system implementation of the BlobStoreRepository @@ -60,31 +62,23 @@ import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getRepo */ public class AzureRepository extends BlobStoreRepository { + private static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); + public final static String TYPE = "azure"; - static public final class Defaults { - public static final String CONTAINER = "elasticsearch-snapshots"; - public static final ByteSizeValue CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); - public static final Boolean COMPRESS = false; - } - - - static public final class Repository { - public static final String ACCOUNT = "account"; - public static final String LOCATION_MODE = "location_mode"; - public static final String CONTAINER = "container"; - public static final String CHUNK_SIZE = "chunk_size"; - public static final String COMPRESS = "compress"; - public static final String BASE_PATH = "base_path"; + public static final class Repository { + public static final Setting ACCOUNT_SETTING = Setting.simpleString("account", false, Setting.Scope.CLUSTER); + public static final Setting CONTAINER_SETTING = new Setting<>("container", "elasticsearch-snapshots", Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path", false, Setting.Scope.CLUSTER); + public static final Setting LOCATION_MODE_SETTING = Setting.simpleString("location_mode", false, Setting.Scope.CLUSTER); + public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, false, Setting.Scope.CLUSTER); + public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, false, Setting.Scope.CLUSTER); } private final AzureBlobStore blobStore; - private final BlobPath basePath; - - private ByteSizeValue chunkSize; - - private boolean compress; + private final ByteSizeValue chunkSize; + private final boolean compress; private final boolean readonly; @Inject @@ -93,30 +87,27 @@ public class AzureRepository extends BlobStoreRepository { AzureBlobStore azureBlobStore) throws IOException, URISyntaxException, StorageException { super(name.getName(), repositorySettings, indexShardRepository); - String container = getRepositorySettings(repositorySettings, Repository.CONTAINER, Storage.CONTAINER, Defaults.CONTAINER); + String container = getValue(repositorySettings, Repository.CONTAINER_SETTING, Storage.CONTAINER_SETTING); this.blobStore = azureBlobStore; - this.chunkSize = getRepositorySettingsAsBytesSize(repositorySettings, Repository.CHUNK_SIZE, Storage.CHUNK_SIZE, Defaults.CHUNK_SIZE); - - if (this.chunkSize.getMb() > 64) { - logger.warn("azure repository does not support yet size > 64mb. Fall back to 64mb."); - this.chunkSize = new ByteSizeValue(64, ByteSizeUnit.MB); + ByteSizeValue configuredChunkSize = getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Storage.CHUNK_SIZE_SETTING); + if (configuredChunkSize.getMb() > MAX_CHUNK_SIZE.getMb()) { + Setting setting = getEffectiveSetting(repositorySettings, Repository.CHUNK_SIZE_SETTING, Storage.CHUNK_SIZE_SETTING); + throw new SettingsException("[" + setting.getKey() + "] must not exceed [" + MAX_CHUNK_SIZE + "] but is set to [" + configuredChunkSize + "]."); + } else { + this.chunkSize = configuredChunkSize; } - this.compress = getRepositorySettingsAsBoolean(repositorySettings, Repository.COMPRESS, Storage.COMPRESS, Defaults.COMPRESS); - String modeStr = getRepositorySettings(repositorySettings, Repository.LOCATION_MODE, Storage.LOCATION_MODE, null); - if (modeStr != null) { + this.compress = getValue(repositorySettings, Repository.COMPRESS_SETTING, Storage.COMPRESS_SETTING); + String modeStr = getValue(repositorySettings, Repository.LOCATION_MODE_SETTING, Storage.LOCATION_MODE_SETTING); + if (Strings.hasLength(modeStr)) { LocationMode locationMode = LocationMode.valueOf(modeStr.toUpperCase(Locale.ROOT)); - if (locationMode == LocationMode.SECONDARY_ONLY) { - readonly = true; - } else { - readonly = false; - } + readonly = locationMode == LocationMode.SECONDARY_ONLY; } else { readonly = false; } - String basePath = getRepositorySettings(repositorySettings, Repository.BASE_PATH, Storage.BASE_PATH, null); + String basePath = getValue(repositorySettings, Repository.BASE_PATH_SETTING, Storage.BASE_PATH_SETTING); if (Strings.hasLength(basePath)) { // Remove starting / if any diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureRepositoryServiceTestCase.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureRepositoryServiceTestCase.java index 73aa7e3921c..d8bca609ce3 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureRepositoryServiceTestCase.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureRepositoryServiceTestCase.java @@ -80,7 +80,7 @@ public abstract class AbstractAzureRepositoryServiceTestCase extends AbstractAzu @Override protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.settingsBuilder() - .put(Storage.CONTAINER, "snapshots"); + .put(Storage.CONTAINER_SETTING.getKey(), "snapshots"); return builder.build(); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTest.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java similarity index 93% rename from plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTest.java rename to plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java index 93683d9d014..5fc4937ea92 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTest.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java @@ -28,7 +28,7 @@ import java.net.URI; import static org.hamcrest.Matchers.is; -public class AzureStorageServiceTest extends ESTestCase { +public class AzureStorageServiceTests extends ESTestCase { final static Settings settings = Settings.builder() .put("cloud.azure.storage.azure1.account", "myaccount1") .put("cloud.azure.storage.azure1.key", "mykey1") @@ -120,24 +120,24 @@ public class AzureStorageServiceTest extends ESTestCase { public void testGetSelectedClientGlobalTimeout() { Settings timeoutSettings = Settings.builder() .put(settings) - .put("cloud.azure.storage.timeout", "10s") + .put(AzureStorageService.Storage.TIMEOUT_SETTING.getKey(), "10s") .build(); AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(timeoutSettings); azureStorageService.doStart(); CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(10 * 1000)); + assertThat(client1.getDefaultRequestOptions().getMaximumExecutionTimeInMs(), is(10 * 1000)); CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); - assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000)); + assertThat(client3.getDefaultRequestOptions().getMaximumExecutionTimeInMs(), is(30 * 1000)); } public void testGetSelectedClientDefaultTimeout() { AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(settings); azureStorageService.doStart(); CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(5 * 60 * 1000)); + assertThat(client1.getDefaultRequestOptions().getMaximumExecutionTimeInMs(), is(5 * 60 * 1000)); CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); - assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000)); + assertThat(client3.getDefaultRequestOptions().getMaximumExecutionTimeInMs(), is(30 * 1000)); } /** diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTest.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java similarity index 97% rename from plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTest.java rename to plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java index eaaf9c224d8..6e36b27cebe 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTest.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java @@ -31,7 +31,7 @@ import java.io.IOException; import static org.hamcrest.Matchers.contains; -public class AzureStorageSettingsFilterTest extends ESTestCase { +public class AzureStorageSettingsFilterTests extends ESTestCase { final static Settings settings = Settings.builder() .put("cloud.azure.storage.azure1.account", "myaccount1") .put("cloud.azure.storage.azure1.key", "mykey1") diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTest.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTests.java similarity index 98% rename from plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTest.java rename to plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTests.java index aec8506ca6d..5347be09da0 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTest.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTests.java @@ -31,7 +31,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class AzureSettingsParserTest extends LuceneTestCase { +public class AzureSettingsParserTests extends LuceneTestCase { public void testParseTwoSettingsExplicitDefault() { Settings settings = Settings.builder() diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java index 1818a5e6252..7f6f3106fc3 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java @@ -106,9 +106,9 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyTestCa logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("azure").setSettings(Settings.settingsBuilder() - .put(Repository.CONTAINER, getContainerName()) - .put(Repository.BASE_PATH, getRepositoryPath()) - .put(Repository.CHUNK_SIZE, randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) + .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) + .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) + .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -197,9 +197,9 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyTestCa logger.info("creating Azure repository with path [{}]", getRepositoryPath()); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName) .setType("azure").setSettings(Settings.settingsBuilder() - .put(Repository.CONTAINER, getContainerName()) - .put(Repository.BASE_PATH, getRepositoryPath()) - .put(Repository.BASE_PATH, randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) + .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) + .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) + .put(Repository.BASE_PATH_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -237,16 +237,16 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyTestCa logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1") .setType("azure").setSettings(Settings.settingsBuilder() - .put(Repository.CONTAINER, getContainerName().concat("-1")) - .put(Repository.BASE_PATH, getRepositoryPath()) - .put(Repository.CHUNK_SIZE, randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) + .put(Repository.CONTAINER_SETTING.getKey(), getContainerName().concat("-1")) + .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) + .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) ).get(); assertThat(putRepositoryResponse1.isAcknowledged(), equalTo(true)); PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2") .setType("azure").setSettings(Settings.settingsBuilder() - .put(Repository.CONTAINER, getContainerName().concat("-2")) - .put(Repository.BASE_PATH, getRepositoryPath()) - .put(Repository.CHUNK_SIZE, randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) + .put(Repository.CONTAINER_SETTING.getKey(), getContainerName().concat("-2")) + .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) + .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) ).get(); assertThat(putRepositoryResponse2.isAcknowledged(), equalTo(true)); @@ -316,7 +316,7 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyTestCa logger.info("--> creating azure repository without any path"); PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") .setSettings(Settings.settingsBuilder() - .put(Repository.CONTAINER, getContainerName()) + .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -337,8 +337,8 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyTestCa logger.info("--> creating azure repository path [{}]", getRepositoryPath()); putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") .setSettings(Settings.settingsBuilder() - .put(Repository.CONTAINER, getContainerName()) - .put(Repository.BASE_PATH, getRepositoryPath()) + .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) + .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -363,7 +363,7 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyTestCa logger.info("--> creating azure repository without any path"); PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") .setSettings(Settings.settingsBuilder() - .put(Repository.CONTAINER, getContainerName()) + .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -414,9 +414,9 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyTestCa try { PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") .setType("azure").setSettings(Settings.settingsBuilder() - .put(Repository.CONTAINER, container) - .put(Repository.BASE_PATH, getRepositoryPath()) - .put(Repository.CHUNK_SIZE, randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) + .put(Repository.CONTAINER_SETTING.getKey(), container) + .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) + .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) ).get(); client().admin().cluster().prepareDeleteRepository("test-repo").get(); try { @@ -444,9 +444,9 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyTestCa logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("azure").setSettings(Settings.settingsBuilder() - .put(Repository.CONTAINER, getContainerName()) - .put(Repository.BASE_PATH, getRepositoryPath()) - .put(Repository.CHUNK_SIZE, randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) + .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) + .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) + .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -492,7 +492,7 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyTestCa try { client.preparePutRepository("test-repo").setType("azure") .setSettings(Settings.settingsBuilder() - .put(Repository.CONTAINER, container) + .put(Repository.CONTAINER_SETTING.getKey(), container) ).get(); fail("we should get a RepositoryVerificationException"); } catch (RepositoryVerificationException e) { diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java index 78085b201a3..92c9df1845d 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.cli.CliTool.ExitStatus; import org.elasticsearch.common.cli.CliToolTestCase; +import org.elasticsearch.common.cli.UserError; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.monitor.jvm.JvmInfo; import org.hamcrest.Matcher; @@ -167,7 +168,7 @@ public class BootstrapCliParserTests extends CliToolTestCase { assertThatTerminalOutput(containsString("Parameter [network.host] needs value")); } - public void testParsingErrors() { + public void testParsingErrors() throws Exception { BootstrapCLIParser parser = new BootstrapCLIParser(terminal); // unknown params @@ -229,12 +230,10 @@ public class BootstrapCliParserTests extends CliToolTestCase { public void testThatHelpfulErrorMessageIsGivenWhenParametersAreOutOfOrder() throws Exception { BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - try { - parser.parse("start", new String[]{"--foo=bar", "-Dbaz=qux"}); - fail("expected IllegalArgumentException for out-of-order parameters"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("must be before any parameters starting with --")); - } + UserError e = expectThrows(UserError.class, () -> { + parser.parse("start", new String[]{"--foo=bar", "-Dbaz=qux"}); + }); + assertThat(e.getMessage(), containsString("must be before any parameters starting with --")); } private void registerProperties(String ... systemProperties) { diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CliToolTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CliToolTests.java index 92ab945dfca..d5b494d9849 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CliToolTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CliToolTests.java @@ -71,9 +71,9 @@ public class CliToolTests extends CliToolTestCase { final AtomicReference executed = new AtomicReference<>(false); final NamedCommand cmd = new NamedCommand("cmd", terminal) { @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) { + public CliTool.ExitStatus execute(Settings settings, Environment env) throws UserError { executed.set(true); - return CliTool.ExitStatus.USAGE; + throw new UserError(CliTool.ExitStatus.USAGE, "bad usage"); } }; SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); @@ -82,39 +82,7 @@ public class CliToolTests extends CliToolTestCase { assertCommandHasBeenExecuted(executed); } - public void testIOError() throws Exception { - Terminal terminal = new MockTerminal(); - final AtomicReference executed = new AtomicReference<>(false); - final NamedCommand cmd = new NamedCommand("cmd", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - executed.set(true); - throw new IOException("io error"); - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - CliTool.ExitStatus status = tool.execute(); - assertStatus(status, CliTool.ExitStatus.IO_ERROR); - assertCommandHasBeenExecuted(executed); - } - - public void testCodeError() throws Exception { - Terminal terminal = new MockTerminal(); - final AtomicReference executed = new AtomicReference<>(false); - final NamedCommand cmd = new NamedCommand("cmd", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - executed.set(true); - throw new Exception("random error"); - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - CliTool.ExitStatus status = tool.execute(); - assertStatus(status, CliTool.ExitStatus.CODE_ERROR); - assertCommandHasBeenExecuted(executed); - } - - public void testMultiCommand() { + public void testMultiCommand() throws Exception { Terminal terminal = new MockTerminal(); int count = randomIntBetween(2, 7); List> executed = new ArrayList<>(count); @@ -141,7 +109,7 @@ public class CliToolTests extends CliToolTestCase { } } - public void testMultiCommandUnknownCommand() { + public void testMultiCommandUnknownCommand() throws Exception { Terminal terminal = new MockTerminal(); int count = randomIntBetween(2, 7); List> executed = new ArrayList<>(count); @@ -184,7 +152,7 @@ public class CliToolTests extends CliToolTestCase { assertThat(terminal.getTerminalOutput(), hasItem(containsString("cmd1 help"))); } - public void testMultiCommandToolHelp() { + public void testMultiCommandToolHelp() throws Exception { CaptureOutputTerminal terminal = new CaptureOutputTerminal(); NamedCommand[] cmds = new NamedCommand[2]; cmds[0] = new NamedCommand("cmd0", terminal) { @@ -206,7 +174,7 @@ public class CliToolTests extends CliToolTestCase { assertThat(terminal.getTerminalOutput(), hasItem(containsString("tool help"))); } - public void testMultiCommandCmdHelp() { + public void testMultiCommandCmdHelp() throws Exception { CaptureOutputTerminal terminal = new CaptureOutputTerminal(); NamedCommand[] cmds = new NamedCommand[2]; cmds[0] = new NamedCommand("cmd0", terminal) { @@ -228,31 +196,19 @@ public class CliToolTests extends CliToolTestCase { assertThat(terminal.getTerminalOutput(), hasItem(containsString("cmd1 help"))); } - public void testThatThrowExceptionCanBeLogged() throws Exception { + public void testNonUserErrorPropagates() throws Exception { CaptureOutputTerminal terminal = new CaptureOutputTerminal(); NamedCommand cmd = new NamedCommand("cmd", terminal) { @Override public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - throw new ElasticsearchException("error message"); + throw new IOException("error message"); } }; SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - assertStatus(tool.execute(), CliTool.ExitStatus.CODE_ERROR); - assertThat(terminal.getTerminalOutput(), hasSize(1)); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("error message"))); - - // set env... and log stack trace - try { - System.setProperty(Terminal.DEBUG_SYSTEM_PROPERTY, "true"); - terminal = new CaptureOutputTerminal(); - assertStatus(new SingleCmdTool("tool", terminal, cmd).execute(), CliTool.ExitStatus.CODE_ERROR); - assertThat(terminal.getTerminalOutput(), hasSize(2)); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("error message"))); - // This class must be part of the stack strace - assertThat(terminal.getTerminalOutput(), hasItem(containsString(getClass().getName()))); - } finally { - System.clearProperty(Terminal.DEBUG_SYSTEM_PROPERTY); - } + IOException e = expectThrows(IOException.class, () -> { + tool.execute(); + }); + assertEquals("error message", e.getMessage()); } public void testMultipleLaunch() throws Exception { diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java new file mode 100644 index 00000000000..727728f84ab --- /dev/null +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -0,0 +1,463 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.DirectoryStream; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFileAttributes; +import java.nio.file.attribute.PosixFilePermission; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Version; +import org.elasticsearch.common.cli.CliTool; +import org.elasticsearch.common.cli.CliToolTestCase; +import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.junit.BeforeClass; + +@LuceneTestCase.SuppressFileSystems("*") +public class InstallPluginCommandTests extends ESTestCase { + + private static boolean isPosix; + + @BeforeClass + public static void checkPosix() throws IOException { + isPosix = Files.getFileAttributeView(createTempFile(), PosixFileAttributeView.class) != null; + } + + /** Stores the posix attributes for a path and resets them on close. */ + static class PosixPermissionsResetter implements AutoCloseable { + private final PosixFileAttributeView attributeView; + final Set permissions; + public PosixPermissionsResetter(Path path) throws IOException { + attributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class); + assertNotNull(attributeView); + permissions = attributeView.readAttributes().permissions(); + } + @Override + public void close() throws IOException { + attributeView.setPermissions(permissions); + } + public void setPermissions(Set newPermissions) throws IOException { + attributeView.setPermissions(newPermissions); + } + } + + /** Creates a test environment with bin, config and plugins directories. */ + static Environment createEnv() throws IOException { + Path home = createTempDir(); + Files.createDirectories(home.resolve("bin")); + Files.createFile(home.resolve("bin").resolve("elasticsearch")); + Files.createDirectories(home.resolve("config")); + Files.createFile(home.resolve("config").resolve("elasticsearch.yml")); + Files.createDirectories(home.resolve("plugins")); + Settings settings = Settings.builder() + .put("path.home", home) + .build(); + return new Environment(settings); + } + + /** creates a fake jar file with empty class files */ + static void writeJar(Path jar, String... classes) throws IOException { + try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(jar))) { + for (String clazz : classes) { + stream.putNextEntry(new ZipEntry(clazz + ".class")); // no package names, just support simple classes + } + } + } + + static String writeZip(Path structure) throws IOException { + Path zip = createTempDir().resolve(structure.getFileName() + ".zip"); + try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) { + Files.walkFileTree(structure, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + stream.putNextEntry(new ZipEntry(structure.relativize(file).toString())); + Files.copy(file, stream); + return FileVisitResult.CONTINUE; + } + }); + } + return zip.toUri().toURL().toString(); + } + + /** creates a plugin .zip and returns the url for testing */ + static String createPlugin(String name, Path structure) throws IOException { + PluginTestUtil.writeProperties(structure, + "description", "fake desc", + "name", name, + "version", "1.0", + "elasticsearch.version", Version.CURRENT.toString(), + "java.version", System.getProperty("java.specification.version"), + "classname", "FakePlugin"); + writeJar(structure.resolve("plugin.jar"), "FakePlugin"); + return writeZip(structure); + } + + static CliToolTestCase.CaptureOutputTerminal installPlugin(String pluginUrl, Environment env) throws Exception { + CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal(Terminal.Verbosity.NORMAL); + CliTool.ExitStatus status = new InstallPluginCommand(terminal, pluginUrl, true).execute(env.settings(), env); + assertEquals(CliTool.ExitStatus.OK, status); + return terminal; + } + + void assertPlugin(String name, Path original, Environment env) throws IOException { + Path got = env.pluginsFile().resolve(name); + assertTrue("dir " + name + " exists", Files.exists(got)); + assertTrue("jar was copied", Files.exists(got.resolve("plugin.jar"))); + assertFalse("bin was not copied", Files.exists(got.resolve("bin"))); + assertFalse("config was not copied", Files.exists(got.resolve("config"))); + if (Files.exists(original.resolve("bin"))) { + Path binDir = env.binFile().resolve(name); + assertTrue("bin dir exists", Files.exists(binDir)); + assertTrue("bin is a dir", Files.isDirectory(binDir)); + PosixFileAttributes binAttributes = null; + if (isPosix) { + binAttributes = Files.readAttributes(env.binFile(), PosixFileAttributes.class); + } + try (DirectoryStream stream = Files.newDirectoryStream(binDir)) { + for (Path file : stream) { + assertFalse("not a dir", Files.isDirectory(file)); + if (isPosix) { + PosixFileAttributes attributes = Files.readAttributes(file, PosixFileAttributes.class); + Set expectedPermissions = new HashSet<>(binAttributes.permissions()); + expectedPermissions.add(PosixFilePermission.OWNER_EXECUTE); + expectedPermissions.add(PosixFilePermission.GROUP_EXECUTE); + expectedPermissions.add(PosixFilePermission.OTHERS_EXECUTE); + assertEquals(expectedPermissions, attributes.permissions()); + } + } + } + } + if (Files.exists(original.resolve("config"))) { + Path configDir = env.configFile().resolve(name); + assertTrue("config dir exists", Files.exists(configDir)); + assertTrue("config is a dir", Files.isDirectory(configDir)); + try (DirectoryStream stream = Files.newDirectoryStream(configDir)) { + for (Path file : stream) { + assertFalse("not a dir", Files.isDirectory(file)); + } + } + } + assertInstallCleaned(env); + } + + void assertInstallCleaned(Environment env) throws IOException { + try (DirectoryStream stream = Files.newDirectoryStream(env.pluginsFile())) { + for (Path file : stream) { + if (file.getFileName().toString().startsWith(".installing")) { + fail("Installation dir still exists, " + file); + } + } + } + } + + public void testSomethingWorks() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + String pluginZip = createPlugin("fake", pluginDir); + installPlugin(pluginZip, env); + assertPlugin("fake", pluginDir, env); + } + + public void testSpaceInUrl() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + String pluginZip = createPlugin("fake", pluginDir); + Path pluginZipWithSpaces = createTempFile("foo bar", ".zip"); + Files.copy(new URL(pluginZip).openStream(), pluginZipWithSpaces, StandardCopyOption.REPLACE_EXISTING); + installPlugin(pluginZipWithSpaces.toUri().toURL().toString(), env); + assertPlugin("fake", pluginDir, env); + } + + public void testMalformedUrlNotMaven() throws Exception { + // has two colons, so it appears similar to maven coordinates + MalformedURLException e = expectThrows(MalformedURLException.class, () -> { + installPlugin("://host:1234", createEnv()); + }); + assertTrue(e.getMessage(), e.getMessage().contains("no protocol")); + } + + public void testPluginsDirMissing() throws Exception { + Environment env = createEnv(); + Files.delete(env.pluginsFile()); + Path pluginDir = createTempDir(); + String pluginZip = createPlugin("fake", pluginDir); + installPlugin(pluginZip, env); + assertPlugin("fake", pluginDir, env); + } + + public void testPluginsDirReadOnly() throws Exception { + assumeTrue("posix filesystem", isPosix); + Environment env = createEnv(); + try (PosixPermissionsResetter pluginsAttrs = new PosixPermissionsResetter(env.pluginsFile())) { + pluginsAttrs.setPermissions(new HashSet<>()); + String pluginZip = createPlugin("fake", createTempDir()); + IOException e = expectThrows(IOException.class, () -> { + installPlugin(pluginZip, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains(env.pluginsFile().toString())); + } + assertInstallCleaned(env); + } + + public void testBuiltinModule() throws Exception { + Environment env = createEnv(); + String pluginZip = createPlugin("lang-groovy", createTempDir()); + UserError e = expectThrows(UserError.class, () -> { + installPlugin(pluginZip, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("is a system module")); + assertInstallCleaned(env); + } + + public void testJarHell() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + writeJar(pluginDir.resolve("other.jar"), "FakePlugin"); + String pluginZip = createPlugin("fake", pluginDir); // adds plugin.jar with FakePlugin + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + installPlugin(pluginZip, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("jar hell")); + assertInstallCleaned(env); + } + + public void testIsolatedPlugins() throws Exception { + Environment env = createEnv(); + // these both share the same FakePlugin class + Path pluginDir1 = createTempDir(); + String pluginZip1 = createPlugin("fake1", pluginDir1); + installPlugin(pluginZip1, env); + Path pluginDir2 = createTempDir(); + String pluginZip2 = createPlugin("fake2", pluginDir2); + installPlugin(pluginZip2, env); + assertPlugin("fake1", pluginDir1, env); + assertPlugin("fake2", pluginDir2, env); + } + + public void testPurgatoryJarHell() throws Exception { + Environment env = createEnv(); + Path pluginDir1 = createTempDir(); + PluginTestUtil.writeProperties(pluginDir1, + "description", "fake desc", + "name", "fake1", + "version", "1.0", + "elasticsearch.version", Version.CURRENT.toString(), + "java.version", System.getProperty("java.specification.version"), + "classname", "FakePlugin", + "isolated", "false"); + writeJar(pluginDir1.resolve("plugin.jar"), "FakePlugin"); + String pluginZip1 = writeZip(pluginDir1); + installPlugin(pluginZip1, env); + + Path pluginDir2 = createTempDir(); + PluginTestUtil.writeProperties(pluginDir2, + "description", "fake desc", + "name", "fake2", + "version", "1.0", + "elasticsearch.version", Version.CURRENT.toString(), + "java.version", System.getProperty("java.specification.version"), + "classname", "FakePlugin", + "isolated", "false"); + writeJar(pluginDir2.resolve("plugin.jar"), "FakePlugin"); + String pluginZip2 = writeZip(pluginDir2); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + installPlugin(pluginZip2, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("jar hell")); + assertInstallCleaned(env); + } + + public void testExistingPlugin() throws Exception { + Environment env = createEnv(); + String pluginZip = createPlugin("fake", createTempDir()); + installPlugin(pluginZip, env); + UserError e = expectThrows(UserError.class, () -> { + installPlugin(pluginZip, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("already exists")); + assertInstallCleaned(env); + } + + public void testBin() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + Path binDir = pluginDir.resolve("bin"); + Files.createDirectory(binDir); + Files.createFile(binDir.resolve("somescript")); + String pluginZip = createPlugin("fake", pluginDir); + installPlugin(pluginZip, env); + assertPlugin("fake", pluginDir, env); + } + + public void testBinNotDir() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + Path binDir = pluginDir.resolve("bin"); + Files.createFile(binDir); + String pluginZip = createPlugin("fake", pluginDir); + UserError e = expectThrows(UserError.class, () -> { + installPlugin(pluginZip, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); + assertInstallCleaned(env); + } + + public void testBinContainsDir() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + Path dirInBinDir = pluginDir.resolve("bin").resolve("foo"); + Files.createDirectories(dirInBinDir); + Files.createFile(dirInBinDir.resolve("somescript")); + String pluginZip = createPlugin("fake", pluginDir); + UserError e = expectThrows(UserError.class, () -> { + installPlugin(pluginZip, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("Directories not allowed in bin dir for plugin")); + assertInstallCleaned(env); + } + + public void testBinConflict() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + Path binDir = pluginDir.resolve("bin"); + Files.createDirectory(binDir); + Files.createFile(binDir.resolve("somescript")); + String pluginZip = createPlugin("elasticsearch", pluginDir); + FileAlreadyExistsException e = expectThrows(FileAlreadyExistsException.class, () -> { + installPlugin(pluginZip, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains(env.binFile().resolve("elasticsearch").toString())); + assertInstallCleaned(env); + } + + public void testBinPermissions() throws Exception { + assumeTrue("posix filesystem", isPosix); + Environment env = createEnv(); + Path pluginDir = createTempDir(); + Path binDir = pluginDir.resolve("bin"); + Files.createDirectory(binDir); + Files.createFile(binDir.resolve("somescript")); + String pluginZip = createPlugin("fake", pluginDir); + try (PosixPermissionsResetter binAttrs = new PosixPermissionsResetter(env.binFile())) { + Set perms = new HashSet<>(binAttrs.permissions); + // make sure at least one execute perm is missing, so we know we forced it during installation + perms.remove(PosixFilePermission.GROUP_EXECUTE); + binAttrs.setPermissions(perms); + installPlugin(pluginZip, env); + assertPlugin("fake", pluginDir, env); + } + } + + public void testConfig() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + Path configDir = pluginDir.resolve("config"); + Files.createDirectory(configDir); + Files.createFile(configDir.resolve("custom.yaml")); + String pluginZip = createPlugin("fake", pluginDir); + installPlugin(pluginZip, env); + assertPlugin("fake", pluginDir, env); + } + + public void testExistingConfig() throws Exception { + Environment env = createEnv(); + Path envConfigDir = env.configFile().resolve("fake"); + Files.createDirectories(envConfigDir); + Files.write(envConfigDir.resolve("custom.yaml"), "existing config".getBytes(StandardCharsets.UTF_8)); + Path pluginDir = createTempDir(); + Path configDir = pluginDir.resolve("config"); + Files.createDirectory(configDir); + Files.write(configDir.resolve("custom.yaml"), "new config".getBytes(StandardCharsets.UTF_8)); + Files.createFile(configDir.resolve("other.yaml")); + String pluginZip = createPlugin("fake", pluginDir); + installPlugin(pluginZip, env); + assertPlugin("fake", pluginDir, env); + List configLines = Files.readAllLines(envConfigDir.resolve("custom.yaml"), StandardCharsets.UTF_8); + assertEquals(1, configLines.size()); + assertEquals("existing config", configLines.get(0)); + assertTrue(Files.exists(envConfigDir.resolve("other.yaml"))); + } + + public void testConfigNotDir() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + Path configDir = pluginDir.resolve("config"); + Files.createFile(configDir); + String pluginZip = createPlugin("fake", pluginDir); + UserError e = expectThrows(UserError.class, () -> { + installPlugin(pluginZip, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); + assertInstallCleaned(env); + } + + public void testConfigContainsDir() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + Path dirInConfigDir = pluginDir.resolve("config").resolve("foo"); + Files.createDirectories(dirInConfigDir); + Files.createFile(dirInConfigDir.resolve("myconfig.yml")); + String pluginZip = createPlugin("fake", pluginDir); + UserError e = expectThrows(UserError.class, () -> { + installPlugin(pluginZip, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("Directories not allowed in config dir for plugin")); + assertInstallCleaned(env); + } + + public void testConfigConflict() throws Exception { + Environment env = createEnv(); + Path pluginDir = createTempDir(); + Path configDir = pluginDir.resolve("config"); + Files.createDirectory(configDir); + Files.createFile(configDir.resolve("myconfig.yml")); + String pluginZip = createPlugin("elasticsearch.yml", pluginDir); + FileAlreadyExistsException e = expectThrows(FileAlreadyExistsException.class, () -> { + installPlugin(pluginZip, env); + }); + assertTrue(e.getMessage(), e.getMessage().contains(env.configFile().resolve("elasticsearch.yml").toString())); + assertInstallCleaned(env); + } + + // TODO: test batch flag? + // TODO: test checksum (need maven/official below) + // TODO: test maven, official, and staging install...need tests with fixtures... +} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java new file mode 100644 index 00000000000..c68e207c0c3 --- /dev/null +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; + +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.common.cli.CliTool; +import org.elasticsearch.common.cli.CliToolTestCase; +import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; + +@LuceneTestCase.SuppressFileSystems("*") +public class ListPluginsCommandTests extends ESTestCase { + + Environment createEnv() throws IOException { + Path home = createTempDir(); + Files.createDirectories(home.resolve("plugins")); + Settings settings = Settings.builder() + .put("path.home", home) + .build(); + return new Environment(settings); + } + + static CliToolTestCase.CaptureOutputTerminal listPlugins(Environment env) throws Exception { + CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal(Terminal.Verbosity.NORMAL); + CliTool.ExitStatus status = new ListPluginsCommand(terminal).execute(env.settings(), env); + assertEquals(CliTool.ExitStatus.OK, status); + return terminal; + } + + public void testPluginsDirMissing() throws Exception { + Environment env = createEnv(); + Files.delete(env.pluginsFile()); + IOException e = expectThrows(IOException.class, () -> { + listPlugins(env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("Plugins directory missing")); + } + + public void testNoPlugins() throws Exception { + CliToolTestCase.CaptureOutputTerminal terminal = listPlugins(createEnv()); + List lines = terminal.getTerminalOutput(); + assertEquals(0, lines.size()); + } + + public void testOnePlugin() throws Exception { + Environment env = createEnv(); + Files.createDirectory(env.pluginsFile().resolve("fake")); + CliToolTestCase.CaptureOutputTerminal terminal = listPlugins(env); + List lines = terminal.getTerminalOutput(); + assertEquals(1, lines.size()); + assertTrue(lines.get(0).contains("fake")); + } + + public void testTwoPlugins() throws Exception { + Environment env = createEnv(); + Files.createDirectory(env.pluginsFile().resolve("fake1")); + Files.createDirectory(env.pluginsFile().resolve("fake2")); + CliToolTestCase.CaptureOutputTerminal terminal = listPlugins(env); + List lines = terminal.getTerminalOutput(); + assertEquals(2, lines.size()); + Collections.sort(lines); + assertTrue(lines.get(0).contains("fake1")); + assertTrue(lines.get(1).contains("fake2")); + } +} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginManagerPermissionTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginManagerPermissionTests.java deleted file mode 100644 index 5e70cf71923..00000000000 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginManagerPermissionTests.java +++ /dev/null @@ -1,377 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugins; - -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.Version; -import org.elasticsearch.common.cli.CliToolTestCase.CaptureOutputTerminal; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.net.URL; -import java.nio.charset.Charset; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; -import java.nio.file.attribute.PosixFileAttributeView; -import java.nio.file.attribute.PosixFileAttributes; -import java.nio.file.attribute.PosixFilePermission; -import java.nio.file.attribute.PosixFilePermissions; -import java.util.HashSet; -import java.util.Set; -import java.util.zip.ZipEntry; -import java.util.zip.ZipOutputStream; - -import static java.nio.file.attribute.PosixFilePermission.GROUP_EXECUTE; -import static java.nio.file.attribute.PosixFilePermission.OTHERS_EXECUTE; -import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDirectoryExists; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; - -// there are some lucene file systems that seem to cause problems (deleted files, dirs instead of files) -@LuceneTestCase.SuppressFileSystems("*") -public class PluginManagerPermissionTests extends ESTestCase { - - private String pluginName = "my-plugin"; - private CaptureOutputTerminal terminal = new CaptureOutputTerminal(); - private Environment environment; - private boolean supportsPermissions; - - @Before - public void setup() { - Path tempDir = createTempDir(); - Settings.Builder settingsBuilder = settingsBuilder().put(Environment.PATH_HOME_SETTING.getKey(), tempDir); - if (randomBoolean()) { - settingsBuilder.put(Environment.PATH_PLUGINS_SETTING.getKey(), createTempDir()); - } - - if (randomBoolean()) { - settingsBuilder.put(Environment.PATH_CONF_SETTING.getKey(), createTempDir()); - } - - environment = new Environment(settingsBuilder.build()); - - supportsPermissions = tempDir.getFileSystem().supportedFileAttributeViews().contains("posix"); - } - - public void testThatUnaccessibleBinDirectoryAbortsPluginInstallation() throws Exception { - assumeTrue("File system does not support permissions, skipping", supportsPermissions); - - URL pluginUrl = createPlugin(true, randomBoolean()); - - Path binPath = environment.binFile().resolve(pluginName); - Files.createDirectories(binPath); - try { - Files.setPosixFilePermissions(binPath, PosixFilePermissions.fromString("---------")); - - PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); - pluginManager.downloadAndExtract(pluginName, terminal, true); - - fail("Expected IOException but did not happen"); - } catch (IOException e) { - assertFileNotExists(environment.pluginsFile().resolve(pluginName)); - assertFileNotExists(environment.configFile().resolve(pluginName)); - // exists, because of our weird permissions above - assertDirectoryExists(environment.binFile().resolve(pluginName)); - - assertThat(terminal.getTerminalOutput(), hasItem(containsString("Error copying bin directory "))); - } finally { - Files.setPosixFilePermissions(binPath, PosixFilePermissions.fromString("rwxrwxrwx")); - } - } - - public void testThatUnaccessiblePluginConfigDirectoryAbortsPluginInstallation() throws Exception { - assumeTrue("File system does not support permissions, skipping", supportsPermissions); - - URL pluginUrl = createPlugin(randomBoolean(), true); - - Path path = environment.configFile().resolve(pluginName); - Files.createDirectories(path); - Files.createFile(path.resolve("my-custom-config.yaml")); - Path binPath = environment.binFile().resolve(pluginName); - Files.createDirectories(binPath); - - try { - Files.setPosixFilePermissions(path.resolve("my-custom-config.yaml"), PosixFilePermissions.fromString("---------")); - Files.setPosixFilePermissions(path, PosixFilePermissions.fromString("---------")); - - PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); - pluginManager.downloadAndExtract(pluginName, terminal, true); - - fail("Expected IOException but did not happen, terminal output was " + terminal.getTerminalOutput()); - } catch (IOException e) { - assertFileNotExists(environment.pluginsFile().resolve(pluginName)); - assertFileNotExists(environment.binFile().resolve(pluginName)); - // exists, because of our weird permissions above - assertDirectoryExists(environment.configFile().resolve(pluginName)); - - assertThat(terminal.getTerminalOutput(), hasItem(containsString("Error copying config directory "))); - } finally { - Files.setPosixFilePermissions(path, PosixFilePermissions.fromString("rwxrwxrwx")); - Files.setPosixFilePermissions(path.resolve("my-custom-config.yaml"), PosixFilePermissions.fromString("rwxrwxrwx")); - } - } - - // config/bin are not writable, but the plugin does not need to put anything into it - public void testThatPluginWithoutBinAndConfigWorksEvenIfPermissionsAreWrong() throws Exception { - assumeTrue("File system does not support permissions, skipping", supportsPermissions); - - URL pluginUrl = createPlugin(false, false); - Path path = environment.configFile().resolve(pluginName); - Files.createDirectories(path); - Files.createFile(path.resolve("my-custom-config.yaml")); - Path binPath = environment.binFile().resolve(pluginName); - Files.createDirectories(binPath); - - try { - Files.setPosixFilePermissions(path.resolve("my-custom-config.yaml"), PosixFilePermissions.fromString("---------")); - Files.setPosixFilePermissions(path, PosixFilePermissions.fromString("---------")); - Files.setPosixFilePermissions(binPath, PosixFilePermissions.fromString("---------")); - - PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); - pluginManager.downloadAndExtract(pluginName, terminal, true); - } finally { - Files.setPosixFilePermissions(binPath, PosixFilePermissions.fromString("rwxrwxrwx")); - Files.setPosixFilePermissions(path, PosixFilePermissions.fromString("rwxrwxrwx")); - Files.setPosixFilePermissions(path.resolve("my-custom-config.yaml"), PosixFilePermissions.fromString("rwxrwxrwx")); - } - - } - - // plugins directory no accessible, should leave no other left over directories - public void testThatNonWritablePluginsDirectoryLeavesNoLeftOver() throws Exception { - assumeTrue("File system does not support permissions, skipping", supportsPermissions); - - URL pluginUrl = createPlugin(true, true); - Files.createDirectories(environment.pluginsFile()); - - try { - Files.setPosixFilePermissions(environment.pluginsFile(), PosixFilePermissions.fromString("---------")); - PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); - try { - pluginManager.downloadAndExtract(pluginName, terminal, true); - fail("Expected IOException due to read-only plugins/ directory"); - } catch (IOException e) { - assertFileNotExists(environment.binFile().resolve(pluginName)); - assertFileNotExists(environment.configFile().resolve(pluginName)); - - Files.setPosixFilePermissions(environment.pluginsFile(), PosixFilePermissions.fromString("rwxrwxrwx")); - assertDirectoryExists(environment.pluginsFile()); - assertFileNotExists(environment.pluginsFile().resolve(pluginName)); - } - } finally { - Files.setPosixFilePermissions(environment.pluginsFile(), PosixFilePermissions.fromString("rwxrwxrwx")); - } - } - - public void testThatUnwriteableBackupFilesInConfigurationDirectoryAreReplaced() throws Exception { - assumeTrue("File system does not support permissions, skipping", supportsPermissions); - - boolean pluginContainsExecutables = randomBoolean(); - URL pluginUrl = createPlugin(pluginContainsExecutables, true); - Files.createDirectories(environment.configFile().resolve(pluginName)); - - Path configFile = environment.configFile().resolve(pluginName).resolve("my-custom-config.yaml"); - Files.createFile(configFile); - Path backupConfigFile = environment.configFile().resolve(pluginName).resolve("my-custom-config.yaml.new"); - Files.createFile(backupConfigFile); - Files.write(backupConfigFile, "foo".getBytes(Charset.forName("UTF-8"))); - - PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); - try { - Files.setPosixFilePermissions(backupConfigFile, PosixFilePermissions.fromString("---------")); - - pluginManager.downloadAndExtract(pluginName, terminal, true); - - if (pluginContainsExecutables) { - assertDirectoryExists(environment.binFile().resolve(pluginName)); - } - assertDirectoryExists(environment.pluginsFile().resolve(pluginName)); - assertDirectoryExists(environment.configFile().resolve(pluginName)); - - assertFileExists(backupConfigFile); - Files.setPosixFilePermissions(backupConfigFile, PosixFilePermissions.fromString("rw-rw-rw-")); - String content = new String(Files.readAllBytes(backupConfigFile), Charset.forName("UTF-8")); - assertThat(content, is(not("foo"))); - } finally { - Files.setPosixFilePermissions(backupConfigFile, PosixFilePermissions.fromString("rw-rw-rw-")); - } - } - - public void testThatConfigDirectoryBeingAFileAbortsInstallationAndDoesNotAccidentallyDeleteThisFile() throws Exception { - assumeTrue("File system does not support permissions, skipping", supportsPermissions); - - Files.createDirectories(environment.configFile()); - Files.createFile(environment.configFile().resolve(pluginName)); - URL pluginUrl = createPlugin(randomBoolean(), true); - - PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); - - try { - pluginManager.downloadAndExtract(pluginName, terminal, true); - fail("Expected plugin installation to fail, but didnt"); - } catch (IOException e) { - assertFileExists(environment.configFile().resolve(pluginName)); - assertFileNotExists(environment.binFile().resolve(pluginName)); - assertFileNotExists(environment.pluginsFile().resolve(pluginName)); - } - } - - public void testThatBinDirectoryBeingAFileAbortsInstallationAndDoesNotAccidentallyDeleteThisFile() throws Exception { - assumeTrue("File system does not support permissions, skipping", supportsPermissions); - - Files.createDirectories(environment.binFile()); - Files.createFile(environment.binFile().resolve(pluginName)); - URL pluginUrl = createPlugin(true, randomBoolean()); - - PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); - - try { - pluginManager.downloadAndExtract(pluginName, terminal, true); - fail("Expected plugin installation to fail, but didnt"); - } catch (IOException e) { - assertFileExists(environment.binFile().resolve(pluginName)); - assertFileNotExists(environment.configFile().resolve(pluginName)); - assertFileNotExists(environment.pluginsFile().resolve(pluginName)); - } - } - - public void testConfigDirectoryOwnerGroupAndPermissions() throws IOException { - assumeTrue("File system does not support permissions, skipping", supportsPermissions); - URL pluginUrl = createPlugin(false, true); - PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); - pluginManager.downloadAndExtract(pluginName, terminal, true); - PosixFileAttributes parentFileAttributes = Files.getFileAttributeView(environment.configFile(), PosixFileAttributeView.class).readAttributes(); - Path configPath = environment.configFile().resolve(pluginName); - PosixFileAttributes pluginConfigDirAttributes = Files.getFileAttributeView(configPath, PosixFileAttributeView.class).readAttributes(); - assertThat(pluginConfigDirAttributes.owner(), equalTo(parentFileAttributes.owner())); - assertThat(pluginConfigDirAttributes.group(), equalTo(parentFileAttributes.group())); - assertThat(pluginConfigDirAttributes.permissions(), equalTo(parentFileAttributes.permissions())); - Path configFile = configPath.resolve("my-custom-config.yaml"); - PosixFileAttributes pluginConfigFileAttributes = Files.getFileAttributeView(configFile, PosixFileAttributeView.class).readAttributes(); - assertThat(pluginConfigFileAttributes.owner(), equalTo(parentFileAttributes.owner())); - assertThat(pluginConfigFileAttributes.group(), equalTo(parentFileAttributes.group())); - Set expectedFilePermissions = new HashSet<>(); - for (PosixFilePermission parentPermission : parentFileAttributes.permissions()) { - switch(parentPermission) { - case OWNER_EXECUTE: - case GROUP_EXECUTE: - case OTHERS_EXECUTE: - break; - default: - expectedFilePermissions.add(parentPermission); - } - } - assertThat(pluginConfigFileAttributes.permissions(), equalTo(expectedFilePermissions)); - } - - public void testBinDirectoryOwnerGroupAndPermissions() throws IOException { - assumeTrue("File system does not support permissions, skipping", supportsPermissions); - URL pluginUrl = createPlugin(true, false); - PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); - pluginManager.downloadAndExtract(pluginName, terminal, true); - PosixFileAttributes parentFileAttributes = Files.getFileAttributeView(environment.binFile(), PosixFileAttributeView.class).readAttributes(); - Path binPath = environment.binFile().resolve(pluginName); - PosixFileAttributes pluginBinDirAttributes = Files.getFileAttributeView(binPath, PosixFileAttributeView.class).readAttributes(); - assertThat(pluginBinDirAttributes.owner(), equalTo(parentFileAttributes.owner())); - assertThat(pluginBinDirAttributes.group(), equalTo(parentFileAttributes.group())); - assertThat(pluginBinDirAttributes.permissions(), equalTo(parentFileAttributes.permissions())); - Path executableFile = binPath.resolve("my-binary"); - PosixFileAttributes pluginExecutableFileAttributes = Files.getFileAttributeView(executableFile, PosixFileAttributeView.class).readAttributes(); - assertThat(pluginExecutableFileAttributes.owner(), equalTo(parentFileAttributes.owner())); - assertThat(pluginExecutableFileAttributes.group(), equalTo(parentFileAttributes.group())); - Set expectedFilePermissions = new HashSet<>(); - expectedFilePermissions.add(OWNER_EXECUTE); - expectedFilePermissions.add(GROUP_EXECUTE); - expectedFilePermissions.add(OTHERS_EXECUTE); - for (PosixFilePermission parentPermission : parentFileAttributes.permissions()) { - switch(parentPermission) { - case OWNER_EXECUTE: - case GROUP_EXECUTE: - case OTHERS_EXECUTE: - break; - default: - expectedFilePermissions.add(parentPermission); - } - } - - assertThat(pluginExecutableFileAttributes.permissions(), equalTo(expectedFilePermissions)); - } - - private URL createPlugin(boolean withBinDir, boolean withConfigDir) throws IOException { - final Path structure = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writeProperties(structure, "description", "fake desc", - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "jvm", "true", - "java.version", "1.7", - "name", pluginName, - "classname", pluginName); - if (withBinDir) { - // create bin dir - Path binDir = structure.resolve("bin"); - Files.createDirectory(binDir); - Files.setPosixFilePermissions(binDir, PosixFilePermissions.fromString("rwxr-xr-x")); - - // create executable - Path executable = binDir.resolve("my-binary"); - Files.createFile(executable); - Files.setPosixFilePermissions(executable, PosixFilePermissions.fromString("rw-r--r--")); - } - if (withConfigDir) { - // create bin dir - Path configDir = structure.resolve("config"); - Files.createDirectory(configDir); - Files.setPosixFilePermissions(configDir, PosixFilePermissions.fromString("rwxr-xr-x")); - - // create config file - Path configFile = configDir.resolve("my-custom-config.yaml"); - Files.createFile(configFile); - Files.write(configFile, "my custom config content".getBytes(Charset.forName("UTF-8"))); - Files.setPosixFilePermissions(configFile, PosixFilePermissions.fromString("rw-r--r--")); - } - - Path zip = createTempDir().resolve(structure.getFileName() + ".zip"); - try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) { - Files.walkFileTree(structure, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - stream.putNextEntry(new ZipEntry(structure.relativize(file).toString())); - Files.copy(file, stream); - return FileVisitResult.CONTINUE; - } - }); - } - return zip.toUri().toURL(); - } -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java deleted file mode 100644 index d997a167541..00000000000 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java +++ /dev/null @@ -1,725 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.plugins; - -import org.apache.http.impl.client.HttpClients; -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.Version; -import org.elasticsearch.common.Base64; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliTool.ExitStatus; -import org.elasticsearch.common.cli.CliToolTestCase.CaptureOutputTerminal; -import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.junit.annotations.Network; -import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; -import org.elasticsearch.test.rest.client.http.HttpResponse; -import org.jboss.netty.bootstrap.ServerBootstrap; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelHandlerContext; -import org.jboss.netty.channel.ChannelPipeline; -import org.jboss.netty.channel.ChannelPipelineFactory; -import org.jboss.netty.channel.Channels; -import org.jboss.netty.channel.MessageEvent; -import org.jboss.netty.channel.SimpleChannelUpstreamHandler; -import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; -import org.jboss.netty.handler.codec.http.DefaultHttpResponse; -import org.jboss.netty.handler.codec.http.HttpRequest; -import org.jboss.netty.handler.codec.http.HttpRequestDecoder; -import org.jboss.netty.handler.codec.http.HttpResponseEncoder; -import org.jboss.netty.handler.codec.http.HttpResponseStatus; -import org.jboss.netty.handler.ssl.SslContext; -import org.jboss.netty.handler.ssl.SslHandler; -import org.jboss.netty.handler.ssl.util.InsecureTrustManagerFactory; -import org.jboss.netty.handler.ssl.util.SelfSignedCertificate; -import org.junit.After; -import org.junit.Before; - -import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSocketFactory; -import java.io.BufferedWriter; -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.StandardOpenOption; -import java.nio.file.attribute.BasicFileAttributes; -import java.nio.file.attribute.PosixFileAttributeView; -import java.nio.file.attribute.PosixFileAttributes; -import java.nio.file.attribute.PosixFilePermission; -import java.util.ArrayList; -import java.util.List; -import java.util.Locale; -import java.util.jar.JarOutputStream; -import java.util.zip.ZipEntry; -import java.util.zip.ZipOutputStream; - -import static org.elasticsearch.common.cli.CliTool.ExitStatus.USAGE; -import static org.elasticsearch.common.cli.CliToolTestCase.args; -import static org.elasticsearch.common.io.FileTestUtils.assertFileContent; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.test.ESIntegTestCase.Scope; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDirectoryExists; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1; - -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0) -@LuceneTestCase.SuppressFileSystems("*") // TODO: clean up this test to allow extra files -// TODO: jimfs is really broken here (throws wrong exception from detection method). -// if its in your classpath, then do not use plugins!!!!!! -@SuppressForbidden(reason = "modifies system properties intentionally") -public class PluginManagerTests extends ESIntegTestCase { - - private Environment environment; - private CaptureOutputTerminal terminal = new CaptureOutputTerminal(); - - @Before - public void setup() throws Exception { - environment = buildInitialSettings(); - System.setProperty("es.default.path.home", Environment.PATH_HOME_SETTING.get(environment.settings())); - Path binDir = environment.binFile(); - if (!Files.exists(binDir)) { - Files.createDirectories(binDir); - } - Path configDir = environment.configFile(); - if (!Files.exists(configDir)) { - Files.createDirectories(configDir); - } - } - - @After - public void clearPathHome() { - System.clearProperty("es.default.path.home"); - } - - private void writeSha1(Path file, boolean corrupt) throws IOException { - String sha1Hex = MessageDigests.toHexString(MessageDigests.sha1().digest(Files.readAllBytes(file))); - try (BufferedWriter out = Files.newBufferedWriter(file.resolveSibling(file.getFileName() + ".sha1"), StandardCharsets.UTF_8)) { - out.write(sha1Hex); - if (corrupt) { - out.write("bad"); - } - } - } - - private void writeMd5(Path file, boolean corrupt) throws IOException { - String md5Hex = MessageDigests.toHexString(MessageDigests.md5().digest(Files.readAllBytes(file))); - try (BufferedWriter out = Files.newBufferedWriter(file.resolveSibling(file.getFileName() + ".md5"), StandardCharsets.UTF_8)) { - out.write(md5Hex); - if (corrupt) { - out.write("bad"); - } - } - } - - /** creates a plugin .zip and returns the url for testing */ - private String createPlugin(final Path structure, String... properties) throws IOException { - PluginTestUtil.writeProperties(structure, properties); - Path zip = createTempDir().resolve(structure.getFileName() + ".zip"); - try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) { - Files.walkFileTree(structure, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - stream.putNextEntry(new ZipEntry(structure.relativize(file).toString())); - Files.copy(file, stream); - return FileVisitResult.CONTINUE; - } - }); - } - if (randomBoolean()) { - writeSha1(zip, false); - } else if (randomBoolean()) { - writeMd5(zip, false); - } - return zip.toUri().toURL().toString(); - } - - /** creates a plugin .zip and bad checksum file and returns the url for testing */ - private String createPluginWithBadChecksum(final Path structure, String... properties) throws IOException { - PluginTestUtil.writeProperties(structure, properties); - Path zip = createTempDir().resolve(structure.getFileName() + ".zip"); - try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) { - Files.walkFileTree(structure, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - stream.putNextEntry(new ZipEntry(structure.relativize(file).toString())); - Files.copy(file, stream); - return FileVisitResult.CONTINUE; - } - }); - } - if (randomBoolean()) { - writeSha1(zip, true); - } else { - writeMd5(zip, true); - } - return zip.toUri().toURL().toString(); - } - - public void testThatPluginNameMustBeSupplied() throws IOException { - Path pluginDir = createTempDir().resolve("fake-plugin"); - String pluginUrl = createPlugin(pluginDir, - "description", "fake desc", - "name", "fake-plugin", - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - assertStatus("install", USAGE); - } - - public void testLocalPluginInstallWithBinAndConfig() throws Exception { - String pluginName = "fake-plugin"; - Path pluginDir = createTempDir().resolve(pluginName); - // create bin/tool and config/file - Files.createDirectories(pluginDir.resolve("bin")); - Files.createFile(pluginDir.resolve("bin").resolve("tool")); - Files.createDirectories(pluginDir.resolve("config")); - Files.createFile(pluginDir.resolve("config").resolve("file")); - - String pluginUrl = createPlugin(pluginDir, - "description", "fake desc", - "name", pluginName, - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - - Path binDir = environment.binFile(); - Path pluginBinDir = binDir.resolve(pluginName); - - Path pluginConfigDir = environment.configFile().resolve(pluginName); - assertStatusOk("install " + pluginUrl + " --verbose"); - - terminal.getTerminalOutput().clear(); - assertStatusOk("list"); - assertThat(terminal.getTerminalOutput(), hasItem(containsString(pluginName))); - - assertDirectoryExists(pluginBinDir); - assertDirectoryExists(pluginConfigDir); - Path toolFile = pluginBinDir.resolve("tool"); - assertFileExists(toolFile); - - // check that the file is marked executable, without actually checking that we can execute it. - PosixFileAttributeView view = Files.getFileAttributeView(toolFile, PosixFileAttributeView.class); - // the view might be null, on e.g. windows, there is nothing to check there! - if (view != null) { - PosixFileAttributes attributes = view.readAttributes(); - assertThat(attributes.permissions(), hasItem(PosixFilePermission.OWNER_EXECUTE)); - assertThat(attributes.permissions(), hasItem(PosixFilePermission.OWNER_READ)); - } - } - - /** - * Test for #7890 - */ - public void testLocalPluginInstallWithBinAndConfigInAlreadyExistingConfigDir_7890() throws Exception { - String pluginName = "fake-plugin"; - Path pluginDir = createTempDir().resolve(pluginName); - // create config/test.txt with contents 'version1' - Files.createDirectories(pluginDir.resolve("config")); - Files.write(pluginDir.resolve("config").resolve("test.txt"), "version1".getBytes(StandardCharsets.UTF_8)); - - String pluginUrl = createPlugin(pluginDir, - "description", "fake desc", - "name", pluginName, - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - - Path pluginConfigDir = environment.configFile().resolve(pluginName); - - assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginUrl)); - - /* - First time, our plugin contains: - - config/test.txt (version1) - */ - assertFileContent(pluginConfigDir, "test.txt", "version1"); - - // We now remove the plugin - assertStatusOk("remove " + pluginName); - - // We should still have test.txt - assertFileContent(pluginConfigDir, "test.txt", "version1"); - - // Installing a new plugin version - /* - Second time, our plugin contains: - - config/test.txt (version2) - - config/dir/testdir.txt (version1) - - config/dir/subdir/testsubdir.txt (version1) - */ - Files.write(pluginDir.resolve("config").resolve("test.txt"), "version2".getBytes(StandardCharsets.UTF_8)); - Files.createDirectories(pluginDir.resolve("config").resolve("dir").resolve("subdir")); - Files.write(pluginDir.resolve("config").resolve("dir").resolve("testdir.txt"), "version1".getBytes(StandardCharsets.UTF_8)); - Files.write(pluginDir.resolve("config").resolve("dir").resolve("subdir").resolve("testsubdir.txt"), "version1".getBytes(StandardCharsets.UTF_8)); - pluginUrl = createPlugin(pluginDir, - "description", "fake desc", - "name", pluginName, - "version", "2.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - - assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginUrl)); - - assertFileContent(pluginConfigDir, "test.txt", "version1"); - assertFileContent(pluginConfigDir, "test.txt.new", "version2"); - assertFileContent(pluginConfigDir, "dir/testdir.txt", "version1"); - assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt", "version1"); - - // Removing - assertStatusOk("remove " + pluginName); - assertFileContent(pluginConfigDir, "test.txt", "version1"); - assertFileContent(pluginConfigDir, "test.txt.new", "version2"); - assertFileContent(pluginConfigDir, "dir/testdir.txt", "version1"); - assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt", "version1"); - - // Installing a new plugin version - /* - Third time, our plugin contains: - - config/test.txt (version3) - - config/test2.txt (version1) - - config/dir/testdir.txt (version2) - - config/dir/testdir2.txt (version1) - - config/dir/subdir/testsubdir.txt (version2) - */ - Files.write(pluginDir.resolve("config").resolve("test.txt"), "version3".getBytes(StandardCharsets.UTF_8)); - Files.write(pluginDir.resolve("config").resolve("test2.txt"), "version1".getBytes(StandardCharsets.UTF_8)); - Files.write(pluginDir.resolve("config").resolve("dir").resolve("testdir.txt"), "version2".getBytes(StandardCharsets.UTF_8)); - Files.write(pluginDir.resolve("config").resolve("dir").resolve("testdir2.txt"), "version1".getBytes(StandardCharsets.UTF_8)); - Files.write(pluginDir.resolve("config").resolve("dir").resolve("subdir").resolve("testsubdir.txt"), "version2".getBytes(StandardCharsets.UTF_8)); - pluginUrl = createPlugin(pluginDir, - "description", "fake desc", - "name", pluginName, - "version", "3.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "jvm", "true", - "classname", "FakePlugin"); - - assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginUrl)); - - assertFileContent(pluginConfigDir, "test.txt", "version1"); - assertFileContent(pluginConfigDir, "test2.txt", "version1"); - assertFileContent(pluginConfigDir, "test.txt.new", "version3"); - assertFileContent(pluginConfigDir, "dir/testdir.txt", "version1"); - assertFileContent(pluginConfigDir, "dir/testdir.txt.new", "version2"); - assertFileContent(pluginConfigDir, "dir/testdir2.txt", "version1"); - assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt", "version1"); - assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt.new", "version2"); - } - - // For #7152 - public void testLocalPluginInstallWithBinOnly_7152() throws Exception { - String pluginName = "fake-plugin"; - Path pluginDir = createTempDir().resolve(pluginName); - // create bin/tool - Files.createDirectories(pluginDir.resolve("bin")); - Files.createFile(pluginDir.resolve("bin").resolve("tool"));; - String pluginUrl = createPlugin(pluginDir, - "description", "fake desc", - "name", "fake-plugin", - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - - Path binDir = environment.binFile(); - Path pluginBinDir = binDir.resolve(pluginName); - - assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginUrl)); - assertThatPluginIsListed(pluginName); - assertDirectoryExists(pluginBinDir); - } - - public void testListInstalledEmpty() throws IOException { - assertStatusOk("list"); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("No plugin detected"))); - } - - public void testListInstalledEmptyWithExistingPluginDirectory() throws IOException { - Files.createDirectory(environment.pluginsFile()); - assertStatusOk("list"); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("No plugin detected"))); - } - - public void testInstallPluginVerbose() throws IOException { - String pluginName = "fake-plugin"; - Path pluginDir = createTempDir().resolve(pluginName); - String pluginUrl = createPlugin(pluginDir, - "description", "fake desc", - "name", pluginName, - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - System.err.println("install " + pluginUrl + " --verbose"); - ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl + " --verbose")); - assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(ExitStatus.OK)); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("Name: fake-plugin"))); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("Description: fake desc"))); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("Version: 1.0"))); - assertThatPluginIsListed(pluginName); - } - - public void testInstallPlugin() throws IOException { - String pluginName = "fake-plugin"; - Path pluginDir = createTempDir().resolve(pluginName); - String pluginUrl = createPlugin(pluginDir, - "description", "fake desc", - "name", pluginName, - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl)); - assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(ExitStatus.OK)); - assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Name: fake-plugin")))); - assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Description:")))); - assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Site:")))); - assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Version:")))); - assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("JVM:")))); - assertThatPluginIsListed(pluginName); - } - - /** - * @deprecated support for this is not going to stick around, seriously. - */ - @Deprecated - public void testAlreadyInstalledNotIsolated() throws Exception { - String pluginName = "fake-plugin"; - Path pluginDir = createTempDir().resolve(pluginName); - Files.createDirectories(pluginDir); - // create a jar file in the plugin - Path pluginJar = pluginDir.resolve("fake-plugin.jar"); - try (ZipOutputStream out = new JarOutputStream(Files.newOutputStream(pluginJar, StandardOpenOption.CREATE))) { - out.putNextEntry(new ZipEntry("foo.class")); - out.closeEntry(); - } - String pluginUrl = createPlugin(pluginDir, - "description", "fake desc", - "name", pluginName, - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "isolated", "false", - "classname", "FakePlugin"); - - // install - ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl)); - assertEquals("unexpected exit status: output: " + terminal.getTerminalOutput(), ExitStatus.OK, status); - - // install again - status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl)); - List output = terminal.getTerminalOutput(); - assertEquals("unexpected exit status: output: " + output, ExitStatus.IO_ERROR, status); - boolean foundExpectedMessage = false; - for (String line : output) { - foundExpectedMessage |= line.contains("already exists"); - } - assertTrue(foundExpectedMessage); - } - - public void testInstallPluginWithBadChecksum() throws IOException { - String pluginName = "fake-plugin"; - Path pluginDir = createTempDir().resolve(pluginName); - String pluginUrl = createPluginWithBadChecksum(pluginDir, - "description", "fake desc", - "name", pluginName, - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - assertStatus(String.format(Locale.ROOT, "install %s --verbose", pluginUrl), - ExitStatus.IO_ERROR); - assertThatPluginIsNotListed(pluginName); - assertFileNotExists(environment.pluginsFile().resolve(pluginName)); - } - - private void singlePluginInstallAndRemove(String pluginDescriptor, String pluginName, String pluginCoordinates) throws IOException { - logger.info("--> trying to download and install [{}]", pluginDescriptor); - if (pluginCoordinates == null) { - assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginDescriptor)); - } else { - assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginCoordinates)); - } - assertThatPluginIsListed(pluginName); - - terminal.getTerminalOutput().clear(); - assertStatusOk("remove " + pluginDescriptor); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("Removing " + pluginDescriptor))); - - // not listed anymore - terminal.getTerminalOutput().clear(); - assertStatusOk("list"); - assertThat(terminal.getTerminalOutput(), not(hasItem(containsString(pluginName)))); - } - - /** - * We are ignoring by default these tests as they require to have an internet access - * To activate the test, use -Dtests.network=true - * We test regular form: username/reponame/version - * It should find it in download.elasticsearch.org service - */ - @Network - @AwaitsFix(bugUrl = "fails with jar hell failures - http://build-us-00.elastic.co/job/es_core_master_oracle_6/519/testReport/") - public void testInstallPluginWithElasticsearchDownloadService() throws IOException { - assumeTrue("download.elastic.co is accessible", isDownloadServiceWorking("download.elastic.co", 80, "/elasticsearch/ci-test.txt")); - singlePluginInstallAndRemove("elasticsearch/elasticsearch-transport-thrift/2.4.0", "elasticsearch-transport-thrift", null); - } - - /** - * We are ignoring by default these tests as they require to have an internet access - * To activate the test, use -Dtests.network=true - * We test regular form: groupId/artifactId/version - * It should find it in maven central service - */ - @Network - @AwaitsFix(bugUrl = "fails with jar hell failures - http://build-us-00.elastic.co/job/es_core_master_oracle_6/519/testReport/") - public void testInstallPluginWithMavenCentral() throws IOException { - assumeTrue("search.maven.org is accessible", isDownloadServiceWorking("search.maven.org", 80, "/")); - assumeTrue("repo1.maven.org is accessible", isDownloadServiceWorking("repo1.maven.org", 443, "/maven2/org/elasticsearch/elasticsearch-transport-thrift/2.4.0/elasticsearch-transport-thrift-2.4.0.pom")); - singlePluginInstallAndRemove("org.elasticsearch/elasticsearch-transport-thrift/2.4.0", "elasticsearch-transport-thrift", null); - } - - /** - * We are ignoring by default these tests as they require to have an internet access - * To activate the test, use -Dtests.network=true - * We test site plugins from github: userName/repoName - * It should find it on github - */ - @Network @AwaitsFix(bugUrl = "needs to be adapted to 2.0") - public void testInstallPluginWithGithub() throws IOException { - assumeTrue("github.com is accessible", isDownloadServiceWorking("github.com", 443, "/")); - singlePluginInstallAndRemove("elasticsearch/kibana", "kibana", null); - } - - private boolean isDownloadServiceWorking(String host, int port, String resource) { - try { - String protocol = port == 443 ? "https" : "http"; - HttpResponse response = new HttpRequestBuilder(HttpClients.createDefault()).protocol(protocol).host(host).port(port).path(resource).execute(); - if (response.getStatusCode() != 200) { - logger.warn("[{}{}] download service is not working. Disabling current test.", host, resource); - return false; - } - return true; - } catch (Throwable t) { - logger.warn("[{}{}] download service is not working. Disabling current test.", host, resource); - } - return false; - } - - public void testRemovePlugin() throws Exception { - String pluginName = "plugintest"; - Path pluginDir = createTempDir().resolve(pluginName); - String pluginUrl = createPlugin(pluginDir, - "description", "fake desc", - "name", pluginName, - "version", "1.0.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - - // We want to remove plugin with plugin short name - singlePluginInstallAndRemove("plugintest", "plugintest", pluginUrl); - - // We want to remove plugin with groupid/artifactid/version form - singlePluginInstallAndRemove("groupid/plugintest/1.0.0", "plugintest", pluginUrl); - - // We want to remove plugin with groupid/artifactid form - singlePluginInstallAndRemove("groupid/plugintest", "plugintest", pluginUrl); - } - - public void testRemovePlugin_NullName_ThrowsException() throws IOException { - assertStatus("remove ", USAGE); - } - - public void testRemovePluginWithURLForm() throws Exception { - assertStatus("remove file://whatever", USAGE); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("Illegal plugin name"))); - } - - public void testForbiddenPluginNames() throws IOException { - assertStatus("remove elasticsearch", USAGE); - assertStatus("remove elasticsearch.bat", USAGE); - assertStatus("remove elasticsearch.in.sh", USAGE); - assertStatus("remove plugin", USAGE); - assertStatus("remove plugin.bat", USAGE); - assertStatus("remove service.bat", USAGE); - assertStatus("remove ELASTICSEARCH", USAGE); - assertStatus("remove ELASTICSEARCH.IN.SH", USAGE); - } - - public void testOfficialPluginName_ThrowsException() throws IOException { - PluginManager.checkForOfficialPlugins("analysis-icu"); - PluginManager.checkForOfficialPlugins("analysis-kuromoji"); - PluginManager.checkForOfficialPlugins("analysis-phonetic"); - PluginManager.checkForOfficialPlugins("analysis-smartcn"); - PluginManager.checkForOfficialPlugins("analysis-stempel"); - PluginManager.checkForOfficialPlugins("delete-by-query"); - PluginManager.checkForOfficialPlugins("lang-javascript"); - PluginManager.checkForOfficialPlugins("lang-painless"); - PluginManager.checkForOfficialPlugins("lang-python"); - PluginManager.checkForOfficialPlugins("mapper-attachments"); - PluginManager.checkForOfficialPlugins("mapper-murmur3"); - PluginManager.checkForOfficialPlugins("mapper-size"); - PluginManager.checkForOfficialPlugins("discovery-multicast"); - PluginManager.checkForOfficialPlugins("discovery-azure"); - PluginManager.checkForOfficialPlugins("discovery-ec2"); - PluginManager.checkForOfficialPlugins("discovery-gce"); - PluginManager.checkForOfficialPlugins("repository-azure"); - PluginManager.checkForOfficialPlugins("repository-s3"); - PluginManager.checkForOfficialPlugins("store-smb"); - - try { - PluginManager.checkForOfficialPlugins("elasticsearch-mapper-attachment"); - fail("elasticsearch-mapper-attachment should not be allowed"); - } catch (IllegalArgumentException e) { - // We expect that error - } - } - - public void testThatBasicAuthIsRejectedOnHttp() throws Exception { - assertStatus(String.format(Locale.ROOT, "install http://user:pass@localhost:12345/foo.zip --verbose"), CliTool.ExitStatus.IO_ERROR); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("Basic auth is only supported for HTTPS!"))); - } - - public void testThatBasicAuthIsSupportedWithHttps() throws Exception { - assumeTrue("test requires security manager to be disabled", System.getSecurityManager() == null); - - SSLSocketFactory defaultSocketFactory = HttpsURLConnection.getDefaultSSLSocketFactory(); - ServerBootstrap serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory()); - SelfSignedCertificate ssc = null; - - try { - try { - ssc = new SelfSignedCertificate("localhost"); - } catch (Exception e) { - assumeNoException("self signing shenanigans not supported by this JDK", e); - } - - // Create a trust manager that does not validate certificate chains: - SSLContext sc = SSLContext.getInstance("SSL"); - sc.init(null, InsecureTrustManagerFactory.INSTANCE.getTrustManagers(), null); - HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory()); - - final List requests = new ArrayList<>(); - final SslContext sslContext = SslContext.newServerContext(ssc.certificate(), ssc.privateKey()); - - serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() { - @Override - public ChannelPipeline getPipeline() throws Exception { - return Channels.pipeline( - new SslHandler(sslContext.newEngine()), - new HttpRequestDecoder(), - new HttpResponseEncoder(), - new LoggingServerHandler(requests) - ); - } - }); - - Channel channel = serverBootstrap.bind(new InetSocketAddress(InetAddress.getByName("localhost"), 0)); - int port = ((InetSocketAddress) channel.getLocalAddress()).getPort(); - // IO_ERROR because there is no real file delivered... - assertStatus(String.format(Locale.ROOT, "install https://user:pass@localhost:%s/foo.zip --verbose --timeout 10s", port), ExitStatus.IO_ERROR); - - // ensure that we did not try any other data source like download.elastic.co, in case we specified our own local URL - assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("download.elastic.co")))); - - assertThat(requests, hasSize(1)); - String msg = String.format(Locale.ROOT, "Request header did not contain Authorization header, terminal output was: %s", terminal.getTerminalOutput()); - assertThat(msg, requests.get(0).headers().contains("Authorization"), is(true)); - assertThat(msg, requests.get(0).headers().get("Authorization"), is("Basic " + Base64.encodeBytes("user:pass".getBytes(StandardCharsets.UTF_8)))); - } finally { - HttpsURLConnection.setDefaultSSLSocketFactory(defaultSocketFactory); - serverBootstrap.releaseExternalResources(); - if (ssc != null) { - ssc.delete(); - } - } - } - - private static class LoggingServerHandler extends SimpleChannelUpstreamHandler { - - private List requests; - - public LoggingServerHandler(List requests) { - this.requests = requests; - } - - @Override - public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) throws InterruptedException { - final HttpRequest request = (HttpRequest) e.getMessage(); - requests.add(request); - final org.jboss.netty.handler.codec.http.HttpResponse response = new DefaultHttpResponse(HTTP_1_1, HttpResponseStatus.BAD_REQUEST); - ctx.getChannel().write(response); - } - } - - - - private Environment buildInitialSettings() throws IOException { - Settings settings = settingsBuilder() - .put("http.enabled", true) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); - return InternalSettingsPreparer.prepareEnvironment(settings, null); - } - - private void assertStatusOk(String command) { - assertStatus(command, ExitStatus.OK); - } - - private void assertStatus(String command, ExitStatus exitStatus) { - ExitStatus status = new PluginManagerCliParser(terminal).execute(args(command)); - assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(exitStatus)); - } - - private void assertThatPluginIsListed(String pluginName) { - terminal.getTerminalOutput().clear(); - assertStatusOk("list"); - String message = String.format(Locale.ROOT, "Terminal output was: %s", terminal.getTerminalOutput()); - assertThat(message, terminal.getTerminalOutput(), hasItem(containsString(pluginName))); - } - - private void assertThatPluginIsNotListed(String pluginName) { - terminal.getTerminalOutput().clear(); - assertStatusOk("list"); - String message = String.format(Locale.ROOT, "Terminal output was: %s", terminal.getTerminalOutput()); - assertFalse(message, terminal.getTerminalOutput().contains(pluginName)); - } -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java deleted file mode 100644 index 49edcc7b1d4..00000000000 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugins; - -import org.elasticsearch.Build; -import org.elasticsearch.Version; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.http.client.HttpDownloadHelper; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.ESTestCase; -import org.junit.After; - -import java.io.IOException; -import java.net.URL; -import java.nio.charset.Charset; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.Locale; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; - -/** - * - */ -@SuppressForbidden(reason = "modifies system properties intentionally") -public class PluginManagerUnitTests extends ESTestCase { - @After - public void cleanSystemProperty() { - System.clearProperty(PluginManager.PROPERTY_SUPPORT_STAGING_URLS); - } - - public void testThatConfigDirectoryCanBeOutsideOfElasticsearchHomeDirectory() throws IOException { - String pluginName = randomAsciiOfLength(10); - Path homeFolder = createTempDir(); - Path genericConfigFolder = createTempDir(); - - Settings settings = settingsBuilder() - .put(Environment.PATH_CONF_SETTING.getKey(), genericConfigFolder) - .put(Environment.PATH_HOME_SETTING.getKey(), homeFolder) - .build(); - Environment environment = new Environment(settings); - - PluginManager.PluginHandle pluginHandle = new PluginManager.PluginHandle(pluginName, "version", "user"); - Path configDirPath = pluginHandle.configDir(environment).normalize(); - Path expectedDirPath = genericConfigFolder.resolve(pluginName).normalize(); - assertEquals(configDirPath, expectedDirPath); - } - - public void testSimplifiedNaming() throws IOException { - String pluginName = randomAsciiOfLength(10); - PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(pluginName); - - boolean supportStagingUrls = randomBoolean(); - if (supportStagingUrls) { - System.setProperty(PluginManager.PROPERTY_SUPPORT_STAGING_URLS, "true"); - } - - Iterator iterator = handle.urls().iterator(); - - if (supportStagingUrls) { - String expectedStagingURL = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", - Version.CURRENT.number(), Build.CURRENT.shortHash(), pluginName, Version.CURRENT.number(), pluginName, Version.CURRENT.number()); - assertThat(iterator.next().toExternalForm(), is(expectedStagingURL)); - } - - URL expected = new URL("https", "download.elastic.co", "/elasticsearch/release/org/elasticsearch/plugin/" + pluginName + "/" + Version.CURRENT.number() + "/" + - pluginName + "-" + Version.CURRENT.number() + ".zip"); - assertThat(iterator.next().toExternalForm(), is(expected.toExternalForm())); - - assertThat(iterator.hasNext(), is(false)); - } - - public void testOfficialPluginName() throws IOException { - String randomPluginName = randomFrom(new ArrayList<>(PluginManager.OFFICIAL_PLUGINS)); - PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(randomPluginName); - assertThat(handle.name, is(randomPluginName)); - - boolean supportStagingUrls = randomBoolean(); - if (supportStagingUrls) { - System.setProperty(PluginManager.PROPERTY_SUPPORT_STAGING_URLS, "true"); - } - - Iterator iterator = handle.urls().iterator(); - - if (supportStagingUrls) { - String expectedStagingUrl = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", - Version.CURRENT.number(), Build.CURRENT.shortHash(), randomPluginName, Version.CURRENT.number(), randomPluginName, Version.CURRENT.number()); - assertThat(iterator.next().toExternalForm(), is(expectedStagingUrl)); - } - - String releaseUrl = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip", - randomPluginName, Version.CURRENT.number(), randomPluginName, Version.CURRENT.number()); - assertThat(iterator.next().toExternalForm(), is(releaseUrl)); - - assertThat(iterator.hasNext(), is(false)); - } - - public void testGithubPluginName() throws IOException { - String user = randomAsciiOfLength(6); - String pluginName = randomAsciiOfLength(10); - PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(user + "/" + pluginName); - assertThat(handle.name, is(pluginName)); - assertThat(handle.urls(), hasSize(1)); - assertThat(handle.urls().get(0).toExternalForm(), is(new URL("https", "github.com", "/" + user + "/" + pluginName + "/" + "archive/master.zip").toExternalForm())); - } - - public void testDownloadHelperChecksums() throws Exception { - // Sanity check to make sure the checksum functions never change how they checksum things - assertEquals("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33", - HttpDownloadHelper.SHA1_CHECKSUM.checksum("foo".getBytes(Charset.forName("UTF-8")))); - assertEquals("acbd18db4cc2f85cedef654fccc4a4d8", - HttpDownloadHelper.MD5_CHECKSUM.checksum("foo".getBytes(Charset.forName("UTF-8")))); - } -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java new file mode 100644 index 00000000000..10fbc3c2696 --- /dev/null +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.common.cli.CliTool; +import org.elasticsearch.common.cli.CliToolTestCase; +import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; + +@LuceneTestCase.SuppressFileSystems("*") +public class RemovePluginCommandTests extends ESTestCase { + + /** Creates a test environment with bin, config and plugins directories. */ + static Environment createEnv() throws IOException { + Path home = createTempDir(); + Files.createDirectories(home.resolve("bin")); + Files.createFile(home.resolve("bin").resolve("elasticsearch")); + Files.createDirectories(home.resolve("plugins")); + Settings settings = Settings.builder() + .put("path.home", home) + .build(); + return new Environment(settings); + } + + static CliToolTestCase.CaptureOutputTerminal removePlugin(String name, Environment env) throws Exception { + CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal(Terminal.Verbosity.VERBOSE); + CliTool.ExitStatus status = new RemovePluginCommand(terminal, name).execute(env.settings(), env); + assertEquals(CliTool.ExitStatus.OK, status); + return terminal; + } + + static void assertRemoveCleaned(Environment env) throws IOException { + try (DirectoryStream stream = Files.newDirectoryStream(env.pluginsFile())) { + for (Path file : stream) { + if (file.getFileName().toString().startsWith(".removing")) { + fail("Removal dir still exists, " + file); + } + } + } + } + + public void testMissing() throws Exception { + Environment env = createEnv(); + UserError e = expectThrows(UserError.class, () -> { + removePlugin("dne", env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("Plugin dne not found")); + assertRemoveCleaned(env); + } + + public void testBasic() throws Exception { + Environment env = createEnv(); + Files.createDirectory(env.pluginsFile().resolve("fake")); + Files.createFile(env.pluginsFile().resolve("fake").resolve("plugin.jar")); + Files.createDirectory(env.pluginsFile().resolve("fake").resolve("subdir")); + Files.createDirectory(env.pluginsFile().resolve("other")); + removePlugin("fake", env); + assertFalse(Files.exists(env.pluginsFile().resolve("fake"))); + assertTrue(Files.exists(env.pluginsFile().resolve("other"))); + assertRemoveCleaned(env); + } + + public void testBin() throws Exception { + Environment env = createEnv(); + Files.createDirectories(env.pluginsFile().resolve("fake")); + Path binDir = env.binFile().resolve("fake"); + Files.createDirectories(binDir); + Files.createFile(binDir.resolve("somescript")); + removePlugin("fake", env); + assertFalse(Files.exists(env.pluginsFile().resolve("fake"))); + assertTrue(Files.exists(env.binFile().resolve("elasticsearch"))); + assertFalse(Files.exists(binDir)); + assertRemoveCleaned(env); + } + + public void testBinNotDir() throws Exception { + Environment env = createEnv(); + Files.createDirectories(env.pluginsFile().resolve("elasticsearch")); + UserError e = expectThrows(UserError.class, () -> { + removePlugin("elasticsearch", env); + }); + assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); + assertTrue(Files.exists(env.pluginsFile().resolve("elasticsearch"))); // did not remove + assertTrue(Files.exists(env.binFile().resolve("elasticsearch"))); + assertRemoveCleaned(env); + } +} diff --git a/qa/ingest-disabled/build.gradle b/qa/smoke-test-ingest-disabled/build.gradle similarity index 100% rename from qa/ingest-disabled/build.gradle rename to qa/smoke-test-ingest-disabled/build.gradle diff --git a/qa/ingest-disabled/src/test/java/org/elasticsearch/smoketest/IngestDisabledIT.java b/qa/smoke-test-ingest-disabled/src/test/java/org/elasticsearch/smoketest/IngestDisabledIT.java similarity index 100% rename from qa/ingest-disabled/src/test/java/org/elasticsearch/smoketest/IngestDisabledIT.java rename to qa/smoke-test-ingest-disabled/src/test/java/org/elasticsearch/smoketest/IngestDisabledIT.java diff --git a/qa/ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yaml b/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yaml similarity index 100% rename from qa/ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yaml rename to qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yaml diff --git a/qa/ingest-with-mustache/build.gradle b/qa/smoke-test-ingest-with-all-dependencies/build.gradle similarity index 85% rename from qa/ingest-with-mustache/build.gradle rename to qa/smoke-test-ingest-with-all-dependencies/build.gradle index e5ca482d85a..118e36db012 100644 --- a/qa/ingest-with-mustache/build.gradle +++ b/qa/smoke-test-ingest-with-all-dependencies/build.gradle @@ -20,5 +20,7 @@ apply plugin: 'elasticsearch.rest-test' dependencies { + testCompile project(path: ':modules:ingest-grok', configuration: 'runtime') + testCompile project(path: ':plugins:ingest-geoip', configuration: 'runtime') testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') } diff --git a/qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/AbstractMustacheTests.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/AbstractMustacheTests.java similarity index 100% rename from qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/AbstractMustacheTests.java rename to qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/AbstractMustacheTests.java diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/CombineProcessorsTests.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/CombineProcessorsTests.java new file mode 100644 index 00000000000..0245233a159 --- /dev/null +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/CombineProcessorsTests.java @@ -0,0 +1,209 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest; + +import com.maxmind.geoip2.DatabaseReader; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.collect.HppcMaps; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.ingest.core.CompoundProcessor; +import org.elasticsearch.ingest.core.IngestDocument; +import org.elasticsearch.ingest.core.Pipeline; +import org.elasticsearch.ingest.core.Processor; +import org.elasticsearch.ingest.geoip.GeoIpProcessor; +import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin; +import org.elasticsearch.ingest.grok.GrokProcessor; +import org.elasticsearch.ingest.grok.IngestGrokPlugin; +import org.elasticsearch.ingest.processor.AppendProcessor; +import org.elasticsearch.ingest.processor.ConvertProcessor; +import org.elasticsearch.ingest.processor.DateProcessor; +import org.elasticsearch.ingest.processor.LowercaseProcessor; +import org.elasticsearch.ingest.processor.RemoveProcessor; +import org.elasticsearch.ingest.processor.RenameProcessor; +import org.elasticsearch.ingest.processor.SplitProcessor; +import org.elasticsearch.ingest.processor.TrimProcessor; +import org.elasticsearch.ingest.processor.UppercaseProcessor; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.StreamsUtils; + +import java.io.ByteArrayInputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class CombineProcessorsTests extends ESTestCase { + + private static final String LOG = "70.193.17.92 - - [08/Sep/2014:02:54:42 +0000] \"GET /presentations/logstash-scale11x/images/ahhh___rage_face_by_samusmmx-d5g5zap.png HTTP/1.1\" 200 175208 \"http://mobile.rivals.com/board_posts.asp?SID=880&mid=198829575&fid=2208&tid=198829575&Team=&TeamId=&SiteId=\" \"Mozilla/5.0 (Linux; Android 4.2.2; VS980 4G Build/JDQ39B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.135 Mobile Safari/537.36\""; + + public void testLogging() throws Exception { + Path configDir = createTempDir(); + Path geoIpConfigDir = configDir.resolve("ingest-geoip"); + Files.createDirectories(geoIpConfigDir); + Files.copy(new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-City.mmdb")), geoIpConfigDir.resolve("GeoLite2-City.mmdb")); + Map databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir); + + Map config = new HashMap<>(); + config.put("field", "log"); + config.put("pattern", "%{COMBINEDAPACHELOG}"); + Processor processor1 = new GrokProcessor.Factory(IngestGrokPlugin.loadBuiltinPatterns()).doCreate(null, config); + config = new HashMap<>(); + config.put("field", "response"); + config.put("type", "integer"); + Processor processor2 = new ConvertProcessor.Factory().create(config); + config = new HashMap<>(); + config.put("field", "bytes"); + config.put("type", "integer"); + Processor processor3 = new ConvertProcessor.Factory().create(config); + config = new HashMap<>(); + config.put("match_field", "timestamp"); + config.put("target_field", "timestamp"); + config.put("match_formats", Arrays.asList("dd/MMM/YYYY:HH:mm:ss Z")); + Processor processor4 = new DateProcessor.Factory().create(config); + config = new HashMap<>(); + config.put("source_field", "clientip"); + Processor processor5 = new GeoIpProcessor.Factory(databaseReaders).create(config); + + Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor1, processor2, processor3, processor4, processor5)); + + Map source = new HashMap<>(); + source.put("log", LOG); + IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null, source); + pipeline.execute(document); + + assertThat(document.getSourceAndMetadata().size(), equalTo(17)); + assertThat(document.getSourceAndMetadata().get("request"), equalTo("/presentations/logstash-scale11x/images/ahhh___rage_face_by_samusmmx-d5g5zap.png")); + assertThat(document.getSourceAndMetadata().get("agent"), equalTo("\"Mozilla/5.0 (Linux; Android 4.2.2; VS980 4G Build/JDQ39B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.135 Mobile Safari/537.36\"")); + assertThat(document.getSourceAndMetadata().get("auth"), equalTo("-")); + assertThat(document.getSourceAndMetadata().get("ident"), equalTo("-")); + assertThat(document.getSourceAndMetadata().get("verb"), equalTo("GET")); + assertThat(document.getSourceAndMetadata().get("referrer"), equalTo("\"http://mobile.rivals.com/board_posts.asp?SID=880&mid=198829575&fid=2208&tid=198829575&Team=&TeamId=&SiteId=\"")); + assertThat(document.getSourceAndMetadata().get("response"), equalTo(200)); + assertThat(document.getSourceAndMetadata().get("bytes"), equalTo(175208)); + assertThat(document.getSourceAndMetadata().get("clientip"), equalTo("70.193.17.92")); + assertThat(document.getSourceAndMetadata().get("httpversion"), equalTo("1.1")); + assertThat(document.getSourceAndMetadata().get("rawrequest"), nullValue()); + assertThat(document.getSourceAndMetadata().get("timestamp"), equalTo("2014-09-08T02:54:42.000Z")); + Map geoInfo = (Map) document.getSourceAndMetadata().get("geoip"); + assertThat(geoInfo.size(), equalTo(5)); + assertThat(geoInfo.get("continent_name"), equalTo("North America")); + assertThat(geoInfo.get("city_name"), equalTo("Charlotte")); + assertThat(geoInfo.get("country_iso_code"), equalTo("US")); + assertThat(geoInfo.get("region_name"), equalTo("North Carolina")); + assertThat(geoInfo.get("location"), notNullValue()); + } + + private static final String PERSON = "{\n" + + " \"age\": 33,\n" + + " \"eyeColor\": \"brown\",\n" + + " \"name\": \"Miranda Goodwin\",\n" + + " \"gender\": \"male\",\n" + + " \"company\": \"ATGEN\",\n" + + " \"email\": \"mirandagoodwin@atgen.com\",\n" + + " \"phone\": \"+1 (914) 489-3656\",\n" + + " \"address\": \"713 Bartlett Place, Accoville, Puerto Rico, 9221\",\n" + + " \"registered\": \"2014-11-23T08:34:21 -01:00\",\n" + + " \"tags\": [\n" + + " \"ex\",\n" + + " \"do\",\n" + + " \"occaecat\",\n" + + " \"reprehenderit\",\n" + + " \"anim\",\n" + + " \"laboris\",\n" + + " \"cillum\"\n" + + " ],\n" + + " \"friends\": [\n" + + " {\n" + + " \"id\": 0,\n" + + " \"name\": \"Wendi Odonnell\"\n" + + " },\n" + + " {\n" + + " \"id\": 1,\n" + + " \"name\": \"Mayra Boyd\"\n" + + " },\n" + + " {\n" + + " \"id\": 2,\n" + + " \"name\": \"Lee Gonzalez\"\n" + + " }\n" + + " ]\n" + + " }"; + + @SuppressWarnings("unchecked") + public void testMutate() throws Exception { + Map config = new HashMap<>(); + // TODO: when we add foreach processor we should delete all friends.id fields + config.put("field", "friends.0.id"); + RemoveProcessor processor1 = new RemoveProcessor.Factory(TestTemplateService.instance()).create(config); + config = new HashMap<>(); + config.put("field", "tags"); + config.put("value", "new_value"); + AppendProcessor processor2 = new AppendProcessor.Factory(TestTemplateService.instance()).create(config); + config = new HashMap<>(); + config.put("field", "address"); + config.put("separator", ","); + SplitProcessor processor3 = new SplitProcessor.Factory().create(config); + config = new HashMap<>(); + // TODO: when we add foreach processor, then change the test to trim all address values + config.put("field", "address.1"); + TrimProcessor processor4 = new TrimProcessor.Factory().create(config); + config = new HashMap<>(); + config.put("field", "company"); + LowercaseProcessor processor5 = new LowercaseProcessor.Factory().create(config); + config = new HashMap<>(); + config.put("field", "gender"); + UppercaseProcessor processor6 = new UppercaseProcessor.Factory().create(config); + config = new HashMap<>(); + config.put("field", "eyeColor"); + config.put("to", "eye_color"); + RenameProcessor processor7 = new RenameProcessor.Factory().create(config); + Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor( + processor1, processor2, processor3, processor4, processor5, processor6, processor7 + )); + + Map source = XContentHelper.createParser(new BytesArray(PERSON)).map(); + IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null, source); + pipeline.execute(document); + + assertThat(((List>) document.getSourceAndMetadata().get("friends")).get(0).get("id"), nullValue()); + assertThat(((List>) document.getSourceAndMetadata().get("friends")).get(1).get("id"), equalTo(1)); + assertThat(((List>) document.getSourceAndMetadata().get("friends")).get(2).get("id"), equalTo(2)); + assertThat(document.getFieldValue("tags.7", String.class), equalTo("new_value")); + + List addressDetails = document.getFieldValue("address", List.class); + assertThat(addressDetails.size(), equalTo(4)); + assertThat(addressDetails.get(0), equalTo("713 Bartlett Place")); + assertThat(addressDetails.get(1), equalTo("Accoville")); + assertThat(addressDetails.get(2), equalTo(" Puerto Rico")); + assertThat(addressDetails.get(3), equalTo(" 9221")); + + assertThat(document.getSourceAndMetadata().get("company"), equalTo("atgen")); + assertThat(document.getSourceAndMetadata().get("gender"), equalTo("MALE")); + assertThat(document.getSourceAndMetadata().get("eye_color"), equalTo("brown")); + } + +} diff --git a/qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java similarity index 100% rename from qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java rename to qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java diff --git a/qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/IngestMustacheRemoveProcessorIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestMustacheRemoveProcessorIT.java similarity index 100% rename from qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/IngestMustacheRemoveProcessorIT.java rename to qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestMustacheRemoveProcessorIT.java diff --git a/qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/IngestMustacheSetProcessorIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestMustacheSetProcessorIT.java similarity index 100% rename from qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/IngestMustacheSetProcessorIT.java rename to qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestMustacheSetProcessorIT.java diff --git a/qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/TemplateServiceIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/TemplateServiceIT.java similarity index 100% rename from qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/TemplateServiceIT.java rename to qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/TemplateServiceIT.java diff --git a/qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java similarity index 100% rename from qa/ingest-with-mustache/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java rename to qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java diff --git a/qa/ingest-with-mustache/src/test/java/org/elasticsearch/smoketest/IngestWithMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/smoketest/IngestWithMustacheIT.java similarity index 100% rename from qa/ingest-with-mustache/src/test/java/org/elasticsearch/smoketest/IngestWithMustacheIT.java rename to qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/smoketest/IngestWithMustacheIT.java diff --git a/qa/ingest-with-mustache/src/test/resources/rest-api-spec/test/ingest_mustache/10_pipeline_with_mustache_templates.yaml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest_mustache/10_pipeline_with_mustache_templates.yaml similarity index 93% rename from qa/ingest-with-mustache/src/test/resources/rest-api-spec/test/ingest_mustache/10_pipeline_with_mustache_templates.yaml rename to qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest_mustache/10_pipeline_with_mustache_templates.yaml index 9e644773c6a..491e1dae292 100644 --- a/qa/ingest-with-mustache/src/test/resources/rest-api-spec/test/ingest_mustache/10_pipeline_with_mustache_templates.yaml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest_mustache/10_pipeline_with_mustache_templates.yaml @@ -185,12 +185,13 @@ "processors": [ { "remove" : { + "tag" : "first_processor", "field" : "field_to_remove", "on_failure" : [ { "set" : { "field" : "error", - "value" : "processor [{{ _ingest.on_failure_processor }}]: {{ _ingest.on_failure_message }}" + "value" : "processor {{ _ingest.on_failure_processor_tag }} [{{ _ingest.on_failure_processor_type }}]: {{ _ingest.on_failure_message }}" } } ] @@ -217,4 +218,4 @@ id: 1 - length: { _source: 2 } - match: { _source.do_nothing: "foo" } - - match: { _source.error: "processor [remove]: field [field_to_remove] not present as part of path [field_to_remove]" } + - match: { _source.error: "processor first_processor [remove]: field [field_to_remove] not present as part of path [field_to_remove]" } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash index 9889048e973..18a93711ff9 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash @@ -219,10 +219,6 @@ fi install_and_check_plugin discovery ec2 aws-java-sdk-core-*.jar } -@test "[$GROUP] install multicast discovery plugin" { - install_and_check_plugin discovery multicast -} - @test "[$GROUP] install lang-expression plugin" { install_and_check_plugin lang expression } @@ -325,10 +321,6 @@ fi remove_plugin discovery-ec2 } -@test "[$GROUP] remove multicast discovery plugin" { - remove_plugin discovery-multicast -} - @test "[$GROUP] remove lang-expression plugin" { remove_plugin lang-expression } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index 37a04cbae28..20fc3524283 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -82,10 +82,6 @@ "type": "enum", "options": ["internal", "force"], "description": "Specific version type" - }, - "detect_noop": { - "type": "boolean", - "description": "Specifying as true will cause Elasticsearch to check if there are changes and, if there aren’t, turn the update request into a noop." } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/10_crud.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/10_crud.yaml index 74808331446..c9d7fd451f5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/10_crud.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/10_crud.yaml @@ -53,6 +53,7 @@ --- "Test invalid processor config": - do: + catch: request ingest.put_pipeline: id: "my_pipeline" body: > @@ -66,12 +67,11 @@ } ] } - - match: { "acknowledged": false } - - length: { "error": 4 } - - match: { "error.reason": "[field] required property is missing" } - - match: { "error.property_name": "field" } - - match: { "error.type": "set" } - - match: { "error.tag": "fritag" } + - match: { error.root_cause.0.type: "parse_exception" } + - match: { error.root_cause.0.reason: "[field] required property is missing" } + - match: { error.root_cause.0.header.processor_tag: "fritag" } + - match: { error.root_cause.0.header.processor_type: "set" } + - match: { error.root_cause.0.header.property_name: "field" } --- "Test basic pipeline with on_failure in processor": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/40_simulate.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/40_simulate.yaml index 92fad242db9..288806a5f97 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/40_simulate.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/40_simulate.yaml @@ -98,11 +98,11 @@ } ] } - - length: { error: 4 } - - match: { error.tag: "fails" } - - match: { error.type: "set" } - - match: { error.reason: "[field] required property is missing" } - - match: { error.property_name: "field" } + - match: { error.root_cause.0.type: "parse_exception" } + - match: { error.root_cause.0.reason: "[field] required property is missing" } + - match: { error.root_cause.0.header.processor_tag: "fails" } + - match: { error.root_cause.0.header.processor_type: "set" } + - match: { error.root_cause.0.header.property_name: "field" } --- "Test simulate without index type and id": @@ -191,10 +191,9 @@ } ] } - - length: { error: 4 } - - is_false: error.processor_type - - is_false: error.processor_tag - - match: { error.property_name: "pipeline" } + - is_false: error.root_cause.0.header.processor_type + - is_false: error.root_cause.0.header.processor_tag + - match: { error.root_cause.0.header.property_name: "pipeline" } - match: { error.reason: "[pipeline] required property is missing" } --- @@ -225,11 +224,11 @@ } ] } - - length: { error: 4 } - - match: { error.type: "set" } - - is_false: error.tag - - match: { error.reason: "[value] required property is missing" } - - match: { error.property_name: "value" } + - match: { error.root_cause.0.type: "parse_exception" } + - match: { error.root_cause.0.reason: "[value] required property is missing" } + - match: { error.root_cause.0.header.processor_type: "set" } + - match: { error.root_cause.0.header.property_name: "value" } + - is_false: error.root_cause.0.header.processor_tag --- "Test simulate with verbose flag": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/80_dedot_processor.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/80_dedot_processor.yaml deleted file mode 100644 index bdc64572a45..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/80_dedot_processor.yaml +++ /dev/null @@ -1,64 +0,0 @@ ---- -"Test De-Dot Processor With Provided Separator": - - do: - ingest.put_pipeline: - id: "my_pipeline" - body: > - { - "description": "_description", - "processors": [ - { - "dedot" : { - "separator" : "3" - } - } - ] - } - - match: { acknowledged: true } - - - do: - index: - index: test - type: test - id: 1 - pipeline: "my_pipeline" - body: {"a.b.c": "hello world"} - - - do: - get: - index: test - type: test - id: 1 - - match: { _source.a3b3c: "hello world" } - ---- -"Test De-Dot Processor With Default Separator": - - do: - ingest.put_pipeline: - id: "my_pipeline" - body: > - { - "description": "_description", - "processors": [ - { - "dedot" : { - } - } - ] - } - - match: { acknowledged: true } - - - do: - index: - index: test - type: test - id: 1 - pipeline: "my_pipeline" - body: {"a.b.c": "hello world"} - - - do: - get: - index: test - type: test - id: 1 - - match: { _source.a_b_c: "hello world" } diff --git a/settings.gradle b/settings.gradle index c8616789569..df2ce16c8bc 100644 --- a/settings.gradle +++ b/settings.gradle @@ -24,7 +24,6 @@ List projects = [ 'plugins:discovery-azure', 'plugins:discovery-ec2', 'plugins:discovery-gce', - 'plugins:discovery-multicast', 'plugins:ingest-geoip', 'plugins:lang-javascript', 'plugins:lang-painless', @@ -41,8 +40,8 @@ List projects = [ 'qa:smoke-test-client', 'qa:smoke-test-multinode', 'qa:smoke-test-plugins', - 'qa:ingest-with-mustache', - 'qa:ingest-disabled', + 'qa:smoke-test-ingest-with-all-dependencies', + 'qa:smoke-test-ingest-disabled', 'qa:vagrant', ] diff --git a/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java index ab304c28c54..35c08977ea7 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java @@ -28,11 +28,8 @@ import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.io.PrintWriter; -import java.io.Writer; import java.util.ArrayList; import java.util.List; -import java.util.Locale; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThan; @@ -64,8 +61,6 @@ public abstract class CliToolTestCase extends ESTestCase { */ public static class MockTerminal extends Terminal { - private static final PrintWriter DEV_NULL = new PrintWriter(new DevNullWriter()); - public MockTerminal() { super(Verbosity.NORMAL); } @@ -75,7 +70,7 @@ public abstract class CliToolTestCase extends ESTestCase { } @Override - protected void doPrint(String msg, Object... args) { + protected void doPrint(String msg) { } @Override @@ -89,33 +84,11 @@ public abstract class CliToolTestCase extends ESTestCase { } @Override - public void print(String msg, Object... args) { + public void print(String msg) { } @Override - public void printStackTrace(Throwable t) { - return; - } - - @Override - public PrintWriter writer() { - return DEV_NULL; - } - - private static class DevNullWriter extends Writer { - - @Override - public void write(char[] cbuf, int off, int len) throws IOException { - } - - @Override - public void flush() throws IOException { - } - - @Override - public void close() throws IOException { - } - } + public void printStackTrace(Throwable t) {} } /** @@ -123,7 +96,7 @@ public abstract class CliToolTestCase extends ESTestCase { */ public static class CaptureOutputTerminal extends MockTerminal { - List terminalOutput = new ArrayList(); + List terminalOutput = new ArrayList<>(); public CaptureOutputTerminal() { super(Verbosity.NORMAL); @@ -134,13 +107,13 @@ public abstract class CliToolTestCase extends ESTestCase { } @Override - protected void doPrint(String msg, Object... args) { - terminalOutput.add(String.format(Locale.ROOT, msg, args)); + protected void doPrint(String msg) { + terminalOutput.add(msg); } @Override - public void print(String msg, Object... args) { - doPrint(msg, args); + public void print(String msg) { + doPrint(msg); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index cddf4632cf8..39508644361 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1435,11 +1435,8 @@ public abstract class ESIntegTestCase extends ESTestCase { if (!bogusIds.isEmpty()) { // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (Tuple doc : bogusIds) { - // see https://github.com/elasticsearch/elasticsearch/issues/8706 - final DeleteResponse deleteResponse = client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get(); - if (deleteResponse.isFound() == false) { - logger.warn("failed to delete a dummy doc [{}][{}]", doc.v1(), doc.v2()); - } + assertTrue("failed to delete a dummy doc [" + doc.v1() + "][" + doc.v2() + "]", + client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get().isFound()); } } if (forceRefresh) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index a3736f691d4..fc713400262 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -185,49 +185,49 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { /** * Returns a client to the single-node cluster. */ - public static Client client() { + public Client client() { return NODE.client(); } /** * Returns the single test nodes name. */ - public static String nodeName() { + public String nodeName() { return "node_s_0"; } /** * Return a reference to the singleton node. */ - protected static Node node() { + protected Node node() { return NODE; } /** * Get an instance for a particular class using the injector of the singleton node. */ - protected static T getInstanceFromNode(Class clazz) { + protected T getInstanceFromNode(Class clazz) { return NODE.injector().getInstance(clazz); } /** * Create a new index on the singleton node with empty index settings. */ - protected static IndexService createIndex(String index) { + protected IndexService createIndex(String index) { return createIndex(index, Settings.EMPTY); } /** * Create a new index on the singleton node with the provided index settings. */ - protected static IndexService createIndex(String index, Settings settings) { + protected IndexService createIndex(String index, Settings settings) { return createIndex(index, settings, null, (XContentBuilder) null); } /** * Create a new index on the singleton node with the provided index settings. */ - protected static IndexService createIndex(String index, Settings settings, String type, XContentBuilder mappings) { + protected IndexService createIndex(String index, Settings settings, String type, XContentBuilder mappings) { CreateIndexRequestBuilder createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings); if (type != null && mappings != null) { createIndexRequestBuilder.addMapping(type, mappings); @@ -238,7 +238,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { /** * Create a new index on the singleton node with the provided index settings. */ - protected static IndexService createIndex(String index, Settings settings, String type, Object... mappings) { + protected IndexService createIndex(String index, Settings settings, String type, Object... mappings) { CreateIndexRequestBuilder createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings); if (type != null && mappings != null) { createIndexRequestBuilder.addMapping(type, mappings); @@ -246,7 +246,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { return createIndex(index, createIndexRequestBuilder); } - protected static IndexService createIndex(String index, CreateIndexRequestBuilder createIndexRequestBuilder) { + protected IndexService createIndex(String index, CreateIndexRequestBuilder createIndexRequestBuilder) { assertAcked(createIndexRequestBuilder.get()); // Wait for the index to be allocated so that cluster state updates don't override // changes that would have been done locally @@ -261,7 +261,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { /** * Create a new search context. */ - protected static SearchContext createSearchContext(IndexService indexService) { + protected SearchContext createSearchContext(IndexService indexService) { BigArrays bigArrays = indexService.getIndexServices().getBigArrays(); ThreadPool threadPool = indexService.getIndexServices().getThreadPool(); PageCacheRecycler pageCacheRecycler = node().injector().getInstance(PageCacheRecycler.class); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 67cae85f4c3..e80bb93aeb7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -382,9 +382,18 @@ public abstract class ESTestCase extends LuceneTestCase { return generateRandomStringArray(maxArraySize, maxStringSize, allowNull, true); } + private static String[] TIME_SUFFIXES = new String[]{"d", "H", "ms", "s", "S", "w"}; + + private static String randomTimeValue(int lower, int upper) { + return randomIntBetween(lower, upper) + randomFrom(TIME_SUFFIXES); + } + public static String randomTimeValue() { - final String[] values = new String[]{"d", "H", "ms", "s", "S", "w"}; - return randomIntBetween(0, 1000) + randomFrom(values); + return randomTimeValue(0, 1000); + } + + public static String randomPositiveTimeValue() { + return randomTimeValue(1, 1000); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 658264864e0..d9f634d503e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1036,7 +1036,7 @@ public final class InternalTestCluster extends TestCluster { IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { - assertThat("index shard counter on shard " + indexShard.shardId() + " on node " + nodeAndClient.name + " not 0", indexShard.getOperationsCount(), equalTo(0)); + assertThat("index shard counter on shard " + indexShard.shardId() + " on node " + nodeAndClient.name + " not 0", indexShard.getActiveOperationsCount(), equalTo(0)); } } }