diff --git a/core/pom.xml b/core/pom.xml index 3186718248f..c9f8656eacb 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -314,6 +314,7 @@ org/elasticsearch/common/util/MockBigArrays.class org/elasticsearch/common/util/MockBigArrays$*.class org/elasticsearch/node/NodeMocksPlugin.class + org/elasticsearch/node/MockNode.class diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java index e8243bf5d50..c219d85f9d5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.stats; import com.carrotsearch.hppc.ObjectObjectHashMap; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -67,10 +66,10 @@ public class ClusterStatsIndices implements ToXContent, Streamable { for (ClusterStatsNodeResponse r : nodeResponses) { for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) { - ShardStats indexShardStats = countsPerIndex.get(shardStats.getIndex()); + ShardStats indexShardStats = countsPerIndex.get(shardStats.getShardRouting().getIndex()); if (indexShardStats == null) { indexShardStats = new ShardStats(); - countsPerIndex.put(shardStats.getIndex(), indexShardStats); + countsPerIndex.put(shardStats.getShardRouting().getIndex(), indexShardStats); } indexShardStats.total++; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 26e78264534..5ed40c5db0d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.support.ActionFilters; @@ -106,7 +107,7 @@ public class TransportClusterStatsAction extends TransportNodesAction { +public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; private final IndicesRequestCache indicesRequestCache; @@ -58,48 +57,33 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastAction shardFailures = null; - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // simply ignore non active shards - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - successfulShards++; - } - } - return new ClearIndicesCacheResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures); + protected EmptyResult readShardResult(StreamInput in) throws IOException { + return EmptyResult.readEmptyResultFrom(in); } @Override - protected ShardClearIndicesCacheRequest newShardRequest(int numShards, ShardRouting shard, ClearIndicesCacheRequest request) { - return new ShardClearIndicesCacheRequest(shard.shardId(), request); + protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, shardFailures); } @Override - protected ShardClearIndicesCacheResponse newShardResponse() { - return new ShardClearIndicesCacheResponse(); + protected ClearIndicesCacheRequest readRequestFrom(StreamInput in) throws IOException { + final ClearIndicesCacheRequest request = new ClearIndicesCacheRequest(); + request.readFrom(in); + return request; } @Override - protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request) { - IndexService service = indicesService.indexService(request.shardId().getIndex()); + protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) { + IndexService service = indicesService.indexService(shardRouting.getIndex()); if (service != null) { - IndexShard shard = service.shard(request.shardId().id()); + IndexShard shard = service.shard(shardRouting.id()); boolean clearedAtLeastOne = false; if (request.queryCache()) { clearedAtLeastOne = true; @@ -137,15 +121,15 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastAction { +public class TransportOptimizeAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @@ -54,55 +52,40 @@ public class TransportOptimizeAction extends TransportBroadcastAction shardFailures = null; - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // a non active shard, ignore... - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - successfulShards++; - } - } - return new OptimizeResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures); + protected EmptyResult readShardResult(StreamInput in) throws IOException { + return EmptyResult.readEmptyResultFrom(in); } @Override - protected ShardOptimizeRequest newShardRequest(int numShards, ShardRouting shard, OptimizeRequest request) { - return new ShardOptimizeRequest(shard.shardId(), request); + protected OptimizeResponse newResponse(OptimizeRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + return new OptimizeResponse(totalShards, successfulShards, failedShards, shardFailures); } @Override - protected ShardOptimizeResponse newShardResponse() { - return new ShardOptimizeResponse(); + protected OptimizeRequest readRequestFrom(StreamInput in) throws IOException { + final OptimizeRequest request = new OptimizeRequest(); + request.readFrom(in); + return request; } @Override - protected ShardOptimizeResponse shardOperation(ShardOptimizeRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); - indexShard.optimize(request.optimizeRequest()); - return new ShardOptimizeResponse(request.shardId()); + protected EmptyResult shardOperation(OptimizeRequest request, ShardRouting shardRouting) { + IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).shardSafe(shardRouting.shardId().id()); + indexShard.optimize(request); + return EmptyResult.INSTANCE; } /** * The refresh request works against *all* shards. */ @Override - protected GroupShardsIterator shards(ClusterState clusterState, OptimizeRequest request, String[] concreteIndices) { - return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true); + protected ShardsIterator shards(ClusterState clusterState, OptimizeRequest request, String[] concreteIndices) { + return clusterState.routingTable().allShards(concreteIndices); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java index fea33688c14..0e0881d1729 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.indices.recovery.RecoveryState; import java.io.IOException; import java.util.ArrayList; @@ -38,7 +39,7 @@ import java.util.Map; public class RecoveryResponse extends BroadcastResponse implements ToXContent { private boolean detailed = false; - private Map> shardResponses = new HashMap<>(); + private Map> shardRecoveryStates = new HashMap<>(); public RecoveryResponse() { } @@ -50,18 +51,18 @@ public class RecoveryResponse extends BroadcastResponse implements ToXContent { * @param successfulShards Count of shards successfully processed * @param failedShards Count of shards which failed to process * @param detailed Display detailed metrics - * @param shardResponses Map of indices to shard recovery information + * @param shardRecoveryStates Map of indices to shard recovery information * @param shardFailures List of failures processing shards */ public RecoveryResponse(int totalShards, int successfulShards, int failedShards, boolean detailed, - Map> shardResponses, List shardFailures) { + Map> shardRecoveryStates, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); - this.shardResponses = shardResponses; + this.shardRecoveryStates = shardRecoveryStates; this.detailed = detailed; } public boolean hasRecoveries() { - return shardResponses.size() > 0; + return shardRecoveryStates.size() > 0; } public boolean detailed() { @@ -72,23 +73,23 @@ public class RecoveryResponse extends BroadcastResponse implements ToXContent { this.detailed = detailed; } - public Map> shardResponses() { - return shardResponses; + public Map> shardRecoveryStates() { + return shardRecoveryStates; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (hasRecoveries()) { - for (String index : shardResponses.keySet()) { - List responses = shardResponses.get(index); - if (responses == null || responses.size() == 0) { + for (String index : shardRecoveryStates.keySet()) { + List recoveryStates = shardRecoveryStates.get(index); + if (recoveryStates == null || recoveryStates.size() == 0) { continue; } builder.startObject(index); builder.startArray("shards"); - for (ShardRecoveryResponse recoveryResponse : responses) { + for (RecoveryState recoveryState : recoveryStates) { builder.startObject(); - recoveryResponse.toXContent(builder, params); + recoveryState.toXContent(builder, params); builder.endObject(); } builder.endArray(); @@ -101,12 +102,12 @@ public class RecoveryResponse extends BroadcastResponse implements ToXContent { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeVInt(shardResponses.size()); - for (Map.Entry> entry : shardResponses.entrySet()) { + out.writeVInt(shardRecoveryStates.size()); + for (Map.Entry> entry : shardRecoveryStates.entrySet()) { out.writeString(entry.getKey()); out.writeVInt(entry.getValue().size()); - for (ShardRecoveryResponse recoveryResponse : entry.getValue()) { - recoveryResponse.writeTo(out); + for (RecoveryState recoveryState : entry.getValue()) { + recoveryState.writeTo(out); } } } @@ -118,11 +119,11 @@ public class RecoveryResponse extends BroadcastResponse implements ToXContent { for (int i = 0; i < size; i++) { String s = in.readString(); int listSize = in.readVInt(); - List list = new ArrayList<>(listSize); + List list = new ArrayList<>(listSize); for (int j = 0; j < listSize; j++) { - list.add(ShardRecoveryResponse.readShardRecoveryResponse(in)); + list.add(RecoveryState.readRecoveryState(in)); } - shardResponses.put(s, list); + shardRecoveryStates.put(s, list); } } } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/ShardRecoveryResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/ShardRecoveryResponse.java deleted file mode 100644 index a4104fbc449..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/ShardRecoveryResponse.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.recovery; - -import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.recovery.RecoveryState; - -import java.io.IOException; - -/** - * Information regarding the recovery state of a shard. - */ -public class ShardRecoveryResponse extends BroadcastShardResponse implements ToXContent { - - RecoveryState recoveryState; - - public ShardRecoveryResponse() { } - - /** - * Constructs shard recovery information for the given index and shard id. - * - * @param shardId Id of the shard - */ - ShardRecoveryResponse(ShardId shardId) { - super(shardId); - } - - /** - * Sets the recovery state information for the shard. - * - * @param recoveryState Recovery state - */ - public void recoveryState(RecoveryState recoveryState) { - this.recoveryState = recoveryState; - } - - /** - * Gets the recovery state information for the shard. Null if shard wasn't recovered / recovery didn't start yet. - * - * @return Recovery state - */ - @Nullable - public RecoveryState recoveryState() { - return recoveryState; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - recoveryState.toXContent(builder, params); - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - recoveryState.writeTo(out); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - recoveryState = RecoveryState.readRecoveryState(in); - } - - /** - * Builds a new ShardRecoveryResponse from the give input stream. - * - * @param in Input stream - * @return A new ShardRecoveryResponse - * @throws IOException - */ - public static ShardRecoveryResponse readShardRecoveryResponse(StreamInput in) throws IOException { - ShardRecoveryResponse response = new ShardRecoveryResponse(); - response.readFrom(in); - return response; - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index cee59c7eb2a..af00f8b7895 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -19,40 +19,37 @@ package org.elasticsearch.action.admin.indices.recovery; +import com.google.common.collect.Maps; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; -import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; +import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicReferenceArray; /** * Transport action for shard recovery operation. This transport action does not actually * perform shard recovery, it only reports on recoveries (both active and complete). */ -public class TransportRecoveryAction extends TransportBroadcastAction { +public class TransportRecoveryAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @@ -61,84 +58,55 @@ public class TransportRecoveryAction extends TransportBroadcastAction shardFailures = null; - Map> shardResponses = new HashMap<>(); - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // simply ignore non active shards - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); + @Override + protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + Map> shardResponses = Maps.newHashMap(); + for (RecoveryState recoveryState : responses) { + if (recoveryState == null) { + continue; + } + String indexName = recoveryState.getShardId().getIndex(); + if (!shardResponses.containsKey(indexName)) { + shardResponses.put(indexName, new ArrayList()); + } + if (request.activeOnly()) { + if (recoveryState.getStage() != RecoveryState.Stage.DONE) { + shardResponses.get(indexName).add(recoveryState); } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); } else { - ShardRecoveryResponse recoveryResponse = (ShardRecoveryResponse) shardResponse; - successfulShards++; - - if (recoveryResponse.recoveryState() == null) { - // recovery not yet started - continue; - } - - String indexName = recoveryResponse.getIndex(); - List responses = shardResponses.get(indexName); - - if (responses == null) { - responses = new ArrayList<>(); - shardResponses.put(indexName, responses); - } - - if (request.activeOnly()) { - if (recoveryResponse.recoveryState().getStage() != RecoveryState.Stage.DONE) { - responses.add(recoveryResponse); - } - } else { - responses.add(recoveryResponse); - } + shardResponses.get(indexName).add(recoveryState); } } - - return new RecoveryResponse(shardsResponses.length(), successfulShards, - failedShards, request.detailed(), shardResponses, shardFailures); + return new RecoveryResponse(totalShards, successfulShards, failedShards, request.detailed(), shardResponses, shardFailures); } @Override - protected ShardRecoveryRequest newShardRequest(int numShards, ShardRouting shard, RecoveryRequest request) { - return new ShardRecoveryRequest(shard.shardId(), request); + protected RecoveryRequest readRequestFrom(StreamInput in) throws IOException { + final RecoveryRequest recoveryRequest = new RecoveryRequest(); + recoveryRequest.readFrom(in); + return recoveryRequest; } @Override - protected ShardRecoveryResponse newShardResponse() { - return new ShardRecoveryResponse(); + protected RecoveryState shardOperation(RecoveryRequest request, ShardRouting shardRouting) { + IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); + IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id()); + return indexShard.recoveryState(); } @Override - protected ShardRecoveryResponse shardOperation(ShardRecoveryRequest request) { - - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.shardSafe(request.shardId().id()); - ShardRecoveryResponse shardRecoveryResponse = new ShardRecoveryResponse(request.shardId()); - - RecoveryState state = indexShard.recoveryState(); - shardRecoveryResponse.recoveryState(state); - return shardRecoveryResponse; - } - - @Override - protected GroupShardsIterator shards(ClusterState state, RecoveryRequest request, String[] concreteIndices) { - return state.routingTable().allAssignedShardsGrouped(concreteIndices, true, true); + protected ShardsIterator shards(ClusterState state, RecoveryRequest request, String[] concreteIndices) { + return state.routingTable().allShardsIncludingRelocationTargets(concreteIndices); } @Override @@ -150,14 +118,4 @@ public class TransportRecoveryAction extends TransportBroadcastAction shardFailures) { + IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; } @@ -63,7 +62,7 @@ public class IndicesSegmentResponse extends BroadcastResponse implements ToXCont Set indices = Sets.newHashSet(); for (ShardSegments shard : shards) { - indices.add(shard.getIndex()); + indices.add(shard.getShardRouting().getIndex()); } for (String index : indices) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java index 6e754a26210..4b3264fca40 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java @@ -20,10 +20,10 @@ package org.elasticsearch.action.admin.indices.segments; import com.google.common.collect.ImmutableList; -import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.index.engine.Segment; import java.io.IOException; @@ -33,7 +33,7 @@ import java.util.List; import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; -public class ShardSegments extends BroadcastShardResponse implements Iterable { +public class ShardSegments implements Streamable, Iterable { private ShardRouting shardRouting; @@ -43,7 +43,6 @@ public class ShardSegments extends BroadcastShardResponse implements Iterable segments) { - super(shardRouting.shardId()); this.shardRouting = shardRouting; this.segments = segments; } @@ -89,7 +88,6 @@ public class ShardSegments extends BroadcastShardResponse implements Iterable { +public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @@ -59,7 +52,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastAction shardFailures = null; - final List shards = new ArrayList<>(); - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // simply ignore non active shards - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - shards.add((ShardSegments) shardResponse); - successfulShards++; - } - } - return new IndicesSegmentResponse(shards.toArray(new ShardSegments[shards.size()]), clusterState, shardsResponses.length(), successfulShards, failedShards, shardFailures); + protected ShardSegments readShardResult(StreamInput in) throws IOException { + return ShardSegments.readShardSegments(in); } @Override - protected IndexShardSegmentRequest newShardRequest(int numShards, ShardRouting shard, IndicesSegmentsRequest request) { - return new IndexShardSegmentRequest(shard.shardId(), request); + protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState) { + return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, shardFailures); } @Override - protected ShardSegments newShardResponse() { - return new ShardSegments(); + protected IndicesSegmentsRequest readRequestFrom(StreamInput in) throws IOException { + final IndicesSegmentsRequest request = new IndicesSegmentsRequest(); + request.readFrom(in); + return request; } @Override - protected ShardSegments shardOperation(IndexShardSegmentRequest request) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.shardSafe(request.shardId().id()); - return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments(request.verbose)); - } - - static class IndexShardSegmentRequest extends BroadcastShardRequest { - boolean verbose; - - IndexShardSegmentRequest() { - verbose = false; - } - - IndexShardSegmentRequest(ShardId shardId, IndicesSegmentsRequest request) { - super(shardId, request); - verbose = request.verbose(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(verbose); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - verbose = in.readBoolean(); - } + protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) { + IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndex()); + IndexShard indexShard = indexService.shardSafe(shardRouting.id()); + return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments(request.verbose())); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index d9b8e9da77d..885dddeea6a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -24,7 +24,6 @@ import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -51,7 +50,7 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten } - IndicesStatsResponse(ShardStats[] shards, ClusterState clusterState, int totalShards, int successfulShards, int failedShards, List shardFailures) { + IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; } @@ -90,7 +89,7 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten Set indices = Sets.newHashSet(); for (ShardStats shard : shards) { - indices.add(shard.getIndex()); + indices.add(shard.getShardRouting().getIndex()); } for (String index : indices) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index b3c87de3dd1..8fea8c795eb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -19,11 +19,11 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; @@ -37,7 +37,7 @@ import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEnt /** */ -public class ShardStats extends BroadcastShardResponse implements ToXContent { +public class ShardStats implements Streamable, ToXContent { private ShardRouting shardRouting; private CommonStats commonStats; @Nullable @@ -49,14 +49,13 @@ public class ShardStats extends BroadcastShardResponse implements ToXContent { ShardStats() { } - public ShardStats(IndexShard indexShard, CommonStatsFlags flags) { - super(indexShard.shardId()); - this.shardRouting = indexShard.routingEntry(); - this.dataPath = indexShard.shardPath().getRootDataPath().toString(); - this.statePath = indexShard.shardPath().getRootStatePath().toString(); - this.isCustomDataPath = indexShard.shardPath().isCustomDataPath(); - this.commonStats = new CommonStats(indexShard, flags); - this.commitStats = indexShard.commitStats(); + public ShardStats(ShardRouting routing, ShardPath shardPath, CommonStats commonStats, CommitStats commitStats) { + this.shardRouting = routing; + this.dataPath = shardPath.getRootDataPath().toString(); + this.statePath = shardPath.getRootStatePath().toString(); + this.isCustomDataPath = shardPath.isCustomDataPath(); + this.commitStats = commitStats; + this.commonStats = commonStats; } /** @@ -94,7 +93,6 @@ public class ShardStats extends BroadcastShardResponse implements ToXContent { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); shardRouting = readShardRoutingEntry(in); commonStats = CommonStats.readCommonStats(in); commitStats = CommitStats.readOptionalCommitStatsFrom(in); @@ -105,7 +103,6 @@ public class ShardStats extends BroadcastShardResponse implements ToXContent { @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); shardRouting.writeTo(out); commonStats.writeTo(out); out.writeOptionalStreamable(commitStats); @@ -146,5 +143,4 @@ public class ShardStats extends BroadcastShardResponse implements ToXContent { static final XContentBuilderString NODE = new XContentBuilderString("node"); static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node"); } - } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 9ce5291ba66..0f0cc1afa14 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -21,37 +21,30 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; -import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; +import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicReferenceArray; /** */ -public class TransportIndicesStatsAction extends TransportBroadcastAction { +public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @@ -60,7 +53,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastAction shardFailures = null; - final List shards = new ArrayList<>(); - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // simply ignore non active shards - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - shards.add((ShardStats) shardResponse); - successfulShards++; - } - } - return new IndicesStatsResponse(shards.toArray(new ShardStats[shards.size()]), clusterState, shardsResponses.length(), successfulShards, failedShards, shardFailures); + protected ShardStats readShardResult(StreamInput in) throws IOException { + return ShardStats.readShardStats(in); } @Override - protected IndexShardStatsRequest newShardRequest(int numShards, ShardRouting shard, IndicesStatsRequest request) { - return new IndexShardStatsRequest(shard.shardId(), request); + protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); } @Override - protected ShardStats newShardResponse() { - return new ShardStats(); + protected IndicesStatsRequest readRequestFrom(StreamInput in) throws IOException { + IndicesStatsRequest request = new IndicesStatsRequest(); + request.readFrom(in); + return request; } @Override - protected ShardStats shardOperation(IndexShardStatsRequest request) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.shardSafe(request.shardId().id()); + protected ShardStats shardOperation(IndicesStatsRequest request, ShardRouting shardRouting) { + IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); + IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id()); // if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet if (indexShard.routingEntry() == null) { throw new ShardNotFoundException(indexShard.shardId()); @@ -128,92 +103,65 @@ public class TransportIndicesStatsAction extends TransportBroadcastAction { +public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @@ -58,7 +54,7 @@ public class TransportUpgradeStatusAction extends TransportBroadcastAction shardFailures = null; - final List shards = new ArrayList<>(); - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // simply ignore non active shards - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - shards.add((ShardUpgradeStatus) shardResponse); - successfulShards++; - } - } - return new UpgradeStatusResponse(shards.toArray(new ShardUpgradeStatus[shards.size()]), shardsResponses.length(), successfulShards, failedShards, shardFailures); + protected ShardUpgradeStatus readShardResult(StreamInput in) throws IOException { + return ShardUpgradeStatus.readShardUpgradeStatus(in); } @Override - protected IndexShardUpgradeStatusRequest newShardRequest(int numShards, ShardRouting shard, UpgradeStatusRequest request) { - return new IndexShardUpgradeStatusRequest(shard.shardId(), request); + protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); } @Override - protected ShardUpgradeStatus newShardResponse() { - return new ShardUpgradeStatus(); + protected UpgradeStatusRequest readRequestFrom(StreamInput in) throws IOException { + UpgradeStatusRequest request = new UpgradeStatusRequest(); + request.readFrom(in); + return request; } @Override - protected ShardUpgradeStatus shardOperation(IndexShardUpgradeStatusRequest request) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.shardSafe(request.shardId().id()); + protected ShardUpgradeStatus shardOperation(UpgradeStatusRequest request, ShardRouting shardRouting) { + IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); + IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id()); List segments = indexShard.engine().segments(false); long total_bytes = 0; long to_upgrade_bytes = 0; @@ -136,16 +115,4 @@ public class TransportUpgradeStatusAction extends TransportBroadcastAction { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 16e24ee66ae..82683625df8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -36,14 +36,11 @@ import java.util.Map; import java.util.Set; public class UpgradeStatusResponse extends BroadcastResponse implements ToXContent { - - private ShardUpgradeStatus[] shards; private Map indicesUpgradeStatus; UpgradeStatusResponse() { - } UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, List shardFailures) { @@ -75,7 +72,6 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte return indicesUpgradeStats; } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -120,8 +116,6 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - - builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes()); builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes()); builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient()); @@ -163,10 +157,8 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte } builder.endObject(); } - builder.endObject(); } - builder.endObject(); } return builder; @@ -186,6 +178,5 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT = new XContentBuilderString("size_to_upgrade_ancient"); static final XContentBuilderString SIZE_TO_UPGRADE_IN_BYTES = new XContentBuilderString("size_to_upgrade_in_bytes"); static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT_IN_BYTES = new XContentBuilderString("size_to_upgrade_ancient_in_bytes"); - } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java similarity index 83% rename from core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResponse.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java index d3942038164..46c51757159 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java @@ -20,9 +20,9 @@ package org.elasticsearch.action.admin.indices.upgrade.post; import org.elasticsearch.Version; -import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -31,7 +31,9 @@ import java.text.ParseException; /** * */ -class ShardUpgradeResponse extends BroadcastShardResponse { +class ShardUpgradeResult implements Streamable { + + private ShardId shardId; private org.apache.lucene.util.Version oldestLuceneSegment; @@ -40,16 +42,20 @@ class ShardUpgradeResponse extends BroadcastShardResponse { private boolean primary; - ShardUpgradeResponse() { + ShardUpgradeResult() { } - ShardUpgradeResponse(ShardId shardId, boolean primary, Version upgradeVersion, org.apache.lucene.util.Version oldestLuceneSegment) { - super(shardId); + ShardUpgradeResult(ShardId shardId, boolean primary, Version upgradeVersion, org.apache.lucene.util.Version oldestLuceneSegment) { + this.shardId = shardId; this.primary = primary; this.upgradeVersion = upgradeVersion; this.oldestLuceneSegment = oldestLuceneSegment; } + public ShardId getShardId() { + return shardId; + } + public org.apache.lucene.util.Version oldestLuceneSegment() { return this.oldestLuceneSegment; } @@ -65,7 +71,7 @@ class ShardUpgradeResponse extends BroadcastShardResponse { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + shardId = ShardId.readShardId(in); primary = in.readBoolean(); upgradeVersion = Version.readVersion(in); try { @@ -78,10 +84,9 @@ class ShardUpgradeResponse extends BroadcastShardResponse { @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); + shardId.writeTo(out); out.writeBoolean(primary); Version.writeVersion(upgradeVersion, out); out.writeString(oldestLuceneSegment.toString()); } - } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index 23d40a55cf0..11bc190aef0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -24,32 +24,30 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.PrimaryMissingActionException; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; +import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.ArrayList; +import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.atomic.AtomicReferenceArray; import static com.google.common.collect.Maps.newHashMap; import static com.google.common.collect.Sets.newHashSet; @@ -57,7 +55,7 @@ import static com.google.common.collect.Sets.newHashSet; /** * Upgrade index/indices action. */ -public class TransportUpgradeAction extends TransportBroadcastAction { +public class TransportUpgradeAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @@ -67,56 +65,40 @@ public class TransportUpgradeAction extends TransportBroadcastAction shardFailures = null; + protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List shardUpgradeResults, List shardFailures, ClusterState clusterState) { Map successfulPrimaryShards = newHashMap(); Map> versions = newHashMap(); - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // a non active shard, ignore... - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); + for (ShardUpgradeResult result : shardUpgradeResults) { + successfulShards++; + String index = result.getShardId().getIndex(); + if (result.primary()) { + Integer count = successfulPrimaryShards.get(index); + successfulPrimaryShards.put(index, count == null ? 1 : count + 1); + } + Tuple versionTuple = versions.get(index); + if (versionTuple == null) { + versions.put(index, new Tuple<>(result.upgradeVersion(), result.oldestLuceneSegment())); } else { - successfulShards++; - ShardUpgradeResponse shardUpgradeResponse = (ShardUpgradeResponse) shardResponse; - String index = shardUpgradeResponse.getIndex(); - if (shardUpgradeResponse.primary()) { - Integer count = successfulPrimaryShards.get(index); - successfulPrimaryShards.put(index, count == null ? 1 : count + 1); + // We already have versions for this index - let's see if we need to update them based on the current shard + Version version = versionTuple.v1(); + org.apache.lucene.util.Version luceneVersion = versionTuple.v2(); + // For the metadata we are interested in the _latest_ Elasticsearch version that was processing the metadata + // Since we rewrite the mapping during upgrade the metadata is always rewritten by the latest version + if (result.upgradeVersion().after(versionTuple.v1())) { + version = result.upgradeVersion(); } - Tuple versionTuple = versions.get(index); - if (versionTuple == null) { - versions.put(index, new Tuple<>(shardUpgradeResponse.upgradeVersion(), shardUpgradeResponse.oldestLuceneSegment())); - } else { - // We already have versions for this index - let's see if we need to update them based on the current shard - Version version = versionTuple.v1(); - org.apache.lucene.util.Version luceneVersion = versionTuple.v2(); - // For the metadata we are interested in the _latest_ elasticsearch version that was processing the metadata - // Since we rewrite the mapping during upgrade the metadata is always rewritten by the latest version - if (shardUpgradeResponse.upgradeVersion().after(versionTuple.v1())) { - version = shardUpgradeResponse.upgradeVersion(); - } - // For the lucene version we are interested in the _oldest_ lucene version since it determines the - // oldest version that we need to support - if (shardUpgradeResponse.oldestLuceneSegment().onOrAfter(versionTuple.v2()) == false) { - luceneVersion = shardUpgradeResponse.oldestLuceneSegment(); - } - versions.put(index, new Tuple<>(version, luceneVersion)); + // For the lucene version we are interested in the _oldest_ lucene version since it determines the + // oldest version that we need to support + if (result.oldestLuceneSegment().onOrAfter(versionTuple.v2()) == false) { + luceneVersion = result.oldestLuceneSegment(); } + versions.put(index, new Tuple<>(version, luceneVersion)); } } Map> updatedVersions = newHashMap(); @@ -133,33 +115,37 @@ public class TransportUpgradeAction extends TransportBroadcastAction indicesWithMissingPrimaries = indicesWithMissingPrimaries(clusterState, concreteIndices); if (indicesWithMissingPrimaries.isEmpty()) { return iterator; @@ -231,5 +217,4 @@ public class TransportUpgradeAction extends TransportBroadcastAction the underlying client request + * @param the response to the client request + * @param per-shard operation results + */ +public abstract class TransportBroadcastByNodeAction extends HandledTransportAction { + + private final ClusterService clusterService; + private final TransportService transportService; + + final String transportNodeBroadcastAction; + + public TransportBroadcastByNodeAction( + Settings settings, + String actionName, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + Class request, + String executor) { + super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); + + this.clusterService = clusterService; + this.transportService = transportService; + + transportNodeBroadcastAction = actionName + "[n]"; + + transportService.registerRequestHandler(transportNodeBroadcastAction, new Callable() { + @Override + public NodeRequest call() throws Exception { + return new NodeRequest(); + } + }, executor, new BroadcastByNodeTransportRequestHandler()); + } + + private final Response newResponse( + Request request, + AtomicReferenceArray responses, + List unavailableShardExceptions, + Map> nodes, + ClusterState clusterState) { + int totalShards = 0; + int successfulShards = 0; + List broadcastByNodeResponses = new ArrayList<>(); + List exceptions = new ArrayList<>(); + for (int i = 0; i < responses.length(); i++) { + if (responses.get(i) instanceof FailedNodeException) { + FailedNodeException exception = (FailedNodeException) responses.get(i); + totalShards += nodes.get(exception.nodeId()).size(); + for (ShardRouting shard : nodes.get(exception.nodeId())) { + exceptions.add(new DefaultShardOperationFailedException(shard.getIndex(), shard.getId(), exception)); + } + } else { + NodeResponse response = (NodeResponse) responses.get(i); + broadcastByNodeResponses.addAll(response.results); + totalShards += response.getTotalShards(); + successfulShards += response.getSuccessfulShards(); + for (BroadcastShardOperationFailedException throwable : response.getExceptions()) { + if (!TransportActions.isShardNotAvailableException(throwable)) { + exceptions.add(new DefaultShardOperationFailedException(throwable.getIndex(), throwable.getShardId().getId(), throwable)); + } + } + } + } + totalShards += unavailableShardExceptions.size(); + int failedShards = exceptions.size(); + return newResponse(request, totalShards, successfulShards, failedShards, broadcastByNodeResponses, exceptions, clusterState); + } + + /** + * Deserialize a shard-level result from an input stream + * + * @param in input stream + * @return a deserialized shard-level result + * @throws IOException + */ + protected abstract ShardOperationResult readShardResult(StreamInput in) throws IOException; + + /** + * Creates a new response to the underlying request. + * + * @param request the underlying request + * @param totalShards the total number of shards considered for execution of the operation + * @param successfulShards the total number of shards for which execution of the operation was successful + * @param failedShards the total number of shards for which execution of the operation failed + * @param results the per-node aggregated shard-level results + * @param shardFailures the exceptions corresponding to shard operationa failures + * @param clusterState the cluster state + * @return the response + */ + protected abstract Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState); + + /** + * Deserialize a request from an input stream + * + * @param in input stream + * @return a de-serialized request + * @throws IOException + */ + protected abstract Request readRequestFrom(StreamInput in) throws IOException; + + /** + * Executes the shard-level operation. This method is called once per shard serially on the receiving node. + * + * @param request the node-level request + * @param shardRouting the shard on which to execute the operation + * @return the result of the shard-level operation for the shard + */ + protected abstract ShardOperationResult shardOperation(Request request, ShardRouting shardRouting); + + /** + * Determines the shards on which this operation will be executed on. The operation is executed once per shard. + * + * @param clusterState the cluster state + * @param request the underlying request + * @param concreteIndices the concrete indices on which to execute the operation + * @return the shards on which to execute the operation + */ + protected abstract ShardsIterator shards(ClusterState clusterState, Request request, String[] concreteIndices); + + /** + * Executes a global block check before polling the cluster state. + * + * @param state the cluster state + * @param request the underlying request + * @return a non-null exception if the operation is blocked + */ + protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request); + + /** + * Executes a global request-level check before polling the cluster state. + * + * @param state the cluster state + * @param request the underlying request + * @param concreteIndices the concrete indices on which to execute the operation + * @return a non-null exception if the operation if blocked + */ + protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] concreteIndices); + + @Override + protected void doExecute(Request request, ActionListener listener) { + new AsyncAction(request, listener).start(); + } + + protected class AsyncAction { + private final Request request; + private final ActionListener listener; + private final ClusterState clusterState; + private final DiscoveryNodes nodes; + private final Map> nodeIds; + private final AtomicReferenceArray responses; + private final AtomicInteger counter = new AtomicInteger(); + private List unavailableShardExceptions = new ArrayList<>(); + + protected AsyncAction(Request request, ActionListener listener) { + this.request = request; + this.listener = listener; + + clusterState = clusterService.state(); + nodes = clusterState.nodes(); + + ClusterBlockException globalBlockException = checkGlobalBlock(clusterState, request); + if (globalBlockException != null) { + throw globalBlockException; + } + + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + ClusterBlockException requestBlockException = checkRequestBlock(clusterState, request, concreteIndices); + if (requestBlockException != null) { + throw requestBlockException; + } + + logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version()); + ShardsIterator shardIt = shards(clusterState, request, concreteIndices); + nodeIds = Maps.newHashMap(); + + for (ShardRouting shard : shardIt.asUnordered()) { + if (shard.assignedToNode()) { + String nodeId = shard.currentNodeId(); + if (!nodeIds.containsKey(nodeId)) { + nodeIds.put(nodeId, new ArrayList()); + } + nodeIds.get(nodeId).add(shard); + } else { + unavailableShardExceptions.add( + new NoShardAvailableActionException( + shard.shardId(), + " no shards available for shard " + shard.toString() + " while executing " + actionName + ) + ); + } + } + + responses = new AtomicReferenceArray<>(nodeIds.size()); + } + + public void start() { + if (nodeIds.size() == 0) { + try { + onCompletion(); + } catch (Throwable e) { + listener.onFailure(e); + } + } else { + int nodeIndex = -1; + for (Map.Entry> entry : nodeIds.entrySet()) { + nodeIndex++; + DiscoveryNode node = nodes.get(entry.getKey()); + sendNodeRequest(node, entry.getValue(), nodeIndex); + } + } + } + + private void sendNodeRequest(final DiscoveryNode node, List shards, final int nodeIndex) { + try { + NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards); + transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler() { + @Override + public NodeResponse newInstance() { + return new NodeResponse(); + } + + @Override + public void handleResponse(NodeResponse response) { + onNodeResponse(node, nodeIndex, response); + } + + @Override + public void handleException(TransportException exp) { + onNodeFailure(node, nodeIndex, exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }); + } catch (Throwable e) { + onNodeFailure(node, nodeIndex, e); + } + } + + protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) { + logger.trace("received response for [{}] from node [{}]", actionName, node.id()); + + // this is defensive to protect against the possibility of double invocation + // the current implementation of TransportService#sendRequest guards against this + // but concurrency is hard, safety is important, and the small performance loss here does not matter + if (responses.compareAndSet(nodeIndex, null, response)) { + if (counter.incrementAndGet() == responses.length()) { + onCompletion(); + } + } + } + + protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) { + String nodeId = node.id(); + if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { + logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId); + } + + // this is defensive to protect against the possibility of double invocation + // the current implementation of TransportService#sendRequest guards against this + // but concurrency is hard, safety is important, and the small performance loss here does not matter + if (responses.compareAndSet(nodeIndex, null, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t))) { + if (counter.incrementAndGet() == responses.length()) { + onCompletion(); + } + } + } + + protected void onCompletion() { + Response response = null; + try { + response = newResponse(request, responses, unavailableShardExceptions, nodeIds, clusterState); + } catch (Throwable t) { + logger.debug("failed to combine responses from nodes", t); + listener.onFailure(t); + } + if (response != null) { + try { + listener.onResponse(response); + } catch (Throwable t) { + listener.onFailure(t); + } + } + } + } + + class BroadcastByNodeTransportRequestHandler implements TransportRequestHandler { + @Override + public void messageReceived(final NodeRequest request, TransportChannel channel) throws Exception { + List shards = request.getShards(); + final int totalShards = shards.size(); + logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards); + final Object[] shardResultOrExceptions = new Object[totalShards]; + + int shardIndex = -1; + for (final ShardRouting shardRouting : shards) { + shardIndex++; + onShardOperation(request, shardResultOrExceptions, shardIndex, shardRouting); + } + + List accumulatedExceptions = new ArrayList<>(); + List results = new ArrayList<>(); + for (int i = 0; i < totalShards; i++) { + if (shardResultOrExceptions[i] instanceof BroadcastShardOperationFailedException) { + accumulatedExceptions.add((BroadcastShardOperationFailedException) shardResultOrExceptions[i]); + } else { + results.add((ShardOperationResult) shardResultOrExceptions[i]); + } + } + + channel.sendResponse(new NodeResponse(request.getNodeId(), totalShards, results, accumulatedExceptions)); + } + + private void onShardOperation(final NodeRequest request, final Object[] shardResults, final int shardIndex, final ShardRouting shardRouting) { + try { + logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary()); + ShardOperationResult result = shardOperation(request.indicesLevelRequest, shardRouting); + shardResults[shardIndex] = result; + logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary()); + } catch (Throwable t) { + BroadcastShardOperationFailedException e = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", t); + e.setIndex(shardRouting.getIndex()); + e.setShard(shardRouting.shardId()); + shardResults[shardIndex] = e; + logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary()); + } + } + } + + protected class NodeRequest extends TransportRequest implements IndicesRequest { + private String nodeId; + + private List shards; + + protected Request indicesLevelRequest; + + protected NodeRequest() { + } + + public NodeRequest(String nodeId, Request request, List shards) { + super(request); + this.indicesLevelRequest = request; + this.shards = shards; + this.nodeId = nodeId; + } + + public List getShards() { + return shards; + } + + public String getNodeId() { + return nodeId; + } + + public String[] indices() { + return indicesLevelRequest.indices(); + } + + public IndicesOptions indicesOptions() { + return indicesLevelRequest.indicesOptions(); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + indicesLevelRequest = readRequestFrom(in); + int size = in.readVInt(); + shards = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + shards.add(ShardRouting.readShardRoutingEntry(in)); + } + nodeId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + indicesLevelRequest.writeTo(out); + int size = shards.size(); + out.writeVInt(size); + for (int i = 0; i < size; i++) { + shards.get(i).writeTo(out); + } + out.writeString(nodeId); + } + } + + class NodeResponse extends TransportResponse { + protected String nodeId; + protected int totalShards; + protected List exceptions; + protected List results; + + public NodeResponse() { + } + + public NodeResponse(String nodeId, + int totalShards, + List results, + List exceptions) { + this.nodeId = nodeId; + this.totalShards = totalShards; + this.results = results; + this.exceptions = exceptions; + } + + public String getNodeId() { + return nodeId; + } + + public int getTotalShards() { + return totalShards; + } + + public int getSuccessfulShards() { + return results.size(); + } + + public List getExceptions() { + return exceptions; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + nodeId = in.readString(); + totalShards = in.readVInt(); + int resultsSize = in.readVInt(); + results = new ArrayList<>(resultsSize); + for (; resultsSize > 0; resultsSize--) { + final ShardOperationResult result = in.readBoolean() ? readShardResult(in) : null; + results.add(result); + } + if (in.readBoolean()) { + int failureShards = in.readVInt(); + exceptions = new ArrayList<>(failureShards); + for (int i = 0; i < failureShards; i++) { + exceptions.add(new BroadcastShardOperationFailedException(in)); + } + } else { + exceptions = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(nodeId); + out.writeVInt(totalShards); + out.writeVInt(results.size()); + for (ShardOperationResult result : results) { + out.writeOptionalStreamable(result); + } + out.writeBoolean(exceptions != null); + if (exceptions != null) { + int failureShards = exceptions.size(); + out.writeVInt(failureShards); + for (int i = 0; i < failureShards; i++) { + exceptions.get(i).writeTo(out); + } + } + } + } + + /** + * Can be used for implementations of {@link #shardOperation(BroadcastRequest, ShardRouting) shardOperation} for + * which there is no shard-level return value. + */ + public final static class EmptyResult implements Streamable { + public static EmptyResult INSTANCE = new EmptyResult(); + + private EmptyResult() { + } + + @Override + public void readFrom(StreamInput in) throws IOException { + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + public static EmptyResult readEmptyResultFrom(StreamInput in) { + return INSTANCE; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 48a6ed2bc5a..50c5a3f2582 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -97,7 +97,7 @@ public abstract class TransportMasterNodeAction> pluginClasses = new ArrayList<>(); private boolean loadConfigSettings = true; /** @@ -108,6 +111,14 @@ public class TransportClient extends AbstractClient { return this; } + /** + * Add the given plugin to the client when it is created. + */ + public Builder addPlugin(Class pluginClass) { + pluginClasses.add(pluginClass); + return this; + } + /** * Builds a new instance of the transport client. */ @@ -122,7 +133,7 @@ public class TransportClient extends AbstractClient { .build(); Environment environment = tuple.v2(); - PluginsService pluginsService = new PluginsService(settings, tuple.v2()); + PluginsService pluginsService = new PluginsService(settings, tuple.v2(), pluginClasses); this.settings = pluginsService.updatedSettings(); Version version = Version.CURRENT; diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterInfo.java b/core/src/main/java/org/elasticsearch/cluster/ClusterInfo.java index f21bc9f052e..f10a40b82bb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterInfo.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterInfo.java @@ -32,28 +32,63 @@ import java.util.Map; */ public class ClusterInfo { - private final Map usages; + private final Map leastAvailableSpaceUsage; + private final Map mostAvailableSpaceUsage; final Map shardSizes; public static final ClusterInfo EMPTY = new ClusterInfo(); + private final Map routingToDataPath; - private ClusterInfo() { - this.usages = Collections.emptyMap(); - this.shardSizes = Collections.emptyMap(); + protected ClusterInfo() { + this(Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP); } - public ClusterInfo(Map usages, Map shardSizes) { - this.usages = usages; + /** + * Creates a new ClusterInfo instance. + * + * @param leastAvailableSpaceUsage a node id to disk usage mapping for the path that has the least available space on the node. + * @param mostAvailableSpaceUsage a node id to disk usage mapping for the path that has the most available space on the node. + * @param shardSizes a shardkey to size in bytes mapping per shard. + * @param routingToDataPath the shard routing to datapath mapping + * @see #shardIdentifierFromRouting + */ + public ClusterInfo(final Map leastAvailableSpaceUsage, final Map mostAvailableSpaceUsage, final Map shardSizes, Map routingToDataPath) { + this.leastAvailableSpaceUsage = leastAvailableSpaceUsage; this.shardSizes = shardSizes; + this.mostAvailableSpaceUsage = mostAvailableSpaceUsage; + this.routingToDataPath = routingToDataPath; } - public Map getNodeDiskUsages() { - return this.usages; + /** + * Returns a node id to disk usage mapping for the path that has the least available space on the node. + */ + public Map getNodeLeastAvailableDiskUsages() { + return this.leastAvailableSpaceUsage; } + /** + * Returns a node id to disk usage mapping for the path that has the most available space on the node. + */ + public Map getNodeMostAvailableDiskUsages() { + return this.mostAvailableSpaceUsage; + } + + /** + * Returns the shard size for the given shard routing or null it that metric is not available. + */ public Long getShardSize(ShardRouting shardRouting) { return shardSizes.get(shardIdentifierFromRouting(shardRouting)); } + /** + * Returns the nodes absolute data-path the given shard is allocated on or null if the information is not available. + */ + public String getDataPath(ShardRouting shardRouting) { + return routingToDataPath.get(shardRouting); + } + + /** + * Returns the shard size for the given shard routing or defaultValue it that metric is not available. + */ public long getShardSize(ShardRouting shardRouting, long defaultValue) { Long shardSize = getShardSize(shardRouting); return shardSize == null ? defaultValue : shardSize; diff --git a/core/src/main/java/org/elasticsearch/cluster/DiskUsage.java b/core/src/main/java/org/elasticsearch/cluster/DiskUsage.java index 92725b08831..e91adae9e34 100644 --- a/core/src/main/java/org/elasticsearch/cluster/DiskUsage.java +++ b/core/src/main/java/org/elasticsearch/cluster/DiskUsage.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; public class DiskUsage { final String nodeId; final String nodeName; + final String path; final long totalBytes; final long freeBytes; @@ -35,11 +36,12 @@ public class DiskUsage { * Create a new DiskUsage, if {@code totalBytes} is 0, {@get getFreeDiskAsPercentage} * will always return 100.0% free */ - public DiskUsage(String nodeId, String nodeName, long totalBytes, long freeBytes) { + public DiskUsage(String nodeId, String nodeName, String path, long totalBytes, long freeBytes) { this.nodeId = nodeId; this.nodeName = nodeName; this.freeBytes = freeBytes; this.totalBytes = totalBytes; + this.path = path; } public String getNodeId() { @@ -50,6 +52,10 @@ public class DiskUsage { return nodeName; } + public String getPath() { + return path; + } + public double getFreeDiskAsPercentage() { // We return 100.0% in order to fail "open", in that if we have invalid // numbers for the total bytes, it's as if we don't know disk usage. @@ -77,7 +83,7 @@ public class DiskUsage { @Override public String toString() { - return "[" + nodeId + "][" + nodeName + "] free: " + new ByteSizeValue(getFreeBytes()) + + return "[" + nodeId + "][" + nodeName + "][" + path + "] free: " + new ByteSizeValue(getFreeBytes()) + "[" + Strings.format1Decimals(getFreeDiskAsPercentage(), "%") + "]"; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 8be9465f7b2..71ac5673911 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -32,12 +31,13 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.node.settings.NodeSettingsService; @@ -47,7 +47,6 @@ import org.elasticsearch.transport.ReceiveTimeoutTransportException; import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; /** * InternalClusterInfoService provides the ClusterInfoService interface, @@ -67,7 +66,9 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu private volatile TimeValue updateFrequency; - private volatile Map usages; + private volatile Map leastAvailableSpaceUsages; + private volatile Map mostAvailableSpaceUsages; + private volatile Map shardRoutingToDataPath; private volatile Map shardSizes; private volatile boolean isMaster = false; private volatile boolean enabled; @@ -84,7 +85,9 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService, ThreadPool threadPool) { super(settings); - this.usages = Collections.emptyMap(); + this.leastAvailableSpaceUsages = Collections.emptyMap(); + this.mostAvailableSpaceUsages = Collections.emptyMap(); + this.shardRoutingToDataPath = Collections.emptyMap(); this.shardSizes = Collections.emptyMap(); this.transportNodesStatsAction = transportNodesStatsAction; this.transportIndicesStatsAction = transportIndicesStatsAction; @@ -200,9 +203,16 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu if (logger.isTraceEnabled()) { logger.trace("Removing node from cluster info: {}", removedNode.getId()); } - Map newUsages = new HashMap<>(usages); - newUsages.remove(removedNode.getId()); - usages = Collections.unmodifiableMap(newUsages); + if (leastAvailableSpaceUsages.containsKey(removedNode.getId())) { + Map newMaxUsages = new HashMap<>(leastAvailableSpaceUsages); + newMaxUsages.remove(removedNode.getId()); + leastAvailableSpaceUsages = Collections.unmodifiableMap(newMaxUsages); + } + if (mostAvailableSpaceUsages.containsKey(removedNode.getId())) { + Map newMinUsages = new HashMap<>(mostAvailableSpaceUsages); + newMinUsages.remove(removedNode.getId()); + mostAvailableSpaceUsages = Collections.unmodifiableMap(newMinUsages); + } } } } @@ -210,7 +220,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu @Override public ClusterInfo getClusterInfo() { - return new ClusterInfo(usages, shardSizes); + return new ClusterInfo(leastAvailableSpaceUsages, mostAvailableSpaceUsages, shardSizes, shardRoutingToDataPath); } @Override @@ -313,27 +323,11 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu CountDownLatch nodeLatch = updateNodeStats(new ActionListener() { @Override public void onResponse(NodesStatsResponse nodeStatses) { - Map newUsages = new HashMap<>(); - for (NodeStats nodeStats : nodeStatses.getNodes()) { - if (nodeStats.getFs() == null) { - logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().name()); - } else { - long available = 0; - long total = 0; - - for (FsInfo.Path info : nodeStats.getFs()) { - available += info.getAvailable().bytes(); - total += info.getTotal().bytes(); - } - String nodeId = nodeStats.getNode().id(); - String nodeName = nodeStats.getNode().getName(); - if (logger.isTraceEnabled()) { - logger.trace("node: [{}], total disk: {}, available disk: {}", nodeId, total, available); - } - newUsages.put(nodeId, new DiskUsage(nodeId, nodeName, total, available)); - } - } - usages = Collections.unmodifiableMap(newUsages); + Map newLeastAvaiableUsages = new HashMap<>(); + Map newMostAvaiableUsages = new HashMap<>(); + fillDiskUsagePerNode(logger, nodeStatses.getNodes(), newLeastAvaiableUsages, newMostAvaiableUsages); + leastAvailableSpaceUsages = Collections.unmodifiableMap(newLeastAvaiableUsages); + mostAvailableSpaceUsages = Collections.unmodifiableMap(newMostAvaiableUsages); } @Override @@ -349,7 +343,8 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu logger.warn("Failed to execute NodeStatsAction for ClusterInfoUpdateJob", e); } // we empty the usages list, to be safe - we don't know what's going on. - usages = Collections.emptyMap(); + leastAvailableSpaceUsages = Collections.emptyMap(); + mostAvailableSpaceUsages = Collections.emptyMap(); } } }); @@ -358,16 +353,11 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu @Override public void onResponse(IndicesStatsResponse indicesStatsResponse) { ShardStats[] stats = indicesStatsResponse.getShards(); - HashMap newShardSizes = new HashMap<>(); - for (ShardStats s : stats) { - long size = s.getStats().getStore().sizeInBytes(); - String sid = ClusterInfo.shardIdentifierFromRouting(s.getShardRouting()); - if (logger.isTraceEnabled()) { - logger.trace("shard: {} size: {}", sid, size); - } - newShardSizes.put(sid, size); - } + final HashMap newShardSizes = new HashMap<>(); + final HashMap newShardRoutingToDataPath = new HashMap<>(); + buildShardLevelInfo(logger, stats, newShardSizes, newShardRoutingToDataPath); shardSizes = Collections.unmodifiableMap(newShardSizes); + shardRoutingToDataPath = Collections.unmodifiableMap(newShardRoutingToDataPath); } @Override @@ -384,6 +374,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu } // we empty the usages list, to be safe - we don't know what's going on. shardSizes = Collections.emptyMap(); + shardRoutingToDataPath = Collections.emptyMap(); } } }); @@ -412,5 +403,46 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu } } + static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, HashMap newShardSizes, HashMap newShardRoutingToDataPath) { + for (ShardStats s : stats) { + newShardRoutingToDataPath.put(s.getShardRouting(), s.getDataPath()); + long size = s.getStats().getStore().sizeInBytes(); + String sid = ClusterInfo.shardIdentifierFromRouting(s.getShardRouting()); + if (logger.isTraceEnabled()) { + logger.trace("shard: {} size: {}", sid, size); + } + newShardSizes.put(sid, size); + } + } + + static void fillDiskUsagePerNode(ESLogger logger, NodeStats[] nodeStatsArray, Map newLeastAvaiableUsages, Map newMostAvaiableUsages) { + for (NodeStats nodeStats : nodeStatsArray) { + if (nodeStats.getFs() == null) { + logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().name()); + } else { + FsInfo.Path leastAvailablePath = null; + FsInfo.Path mostAvailablePath = null; + for (FsInfo.Path info : nodeStats.getFs()) { + if (leastAvailablePath == null) { + assert mostAvailablePath == null; + mostAvailablePath = leastAvailablePath = info; + } else if (leastAvailablePath.getAvailable().bytes() > info.getAvailable().bytes()){ + leastAvailablePath = info; + } else if (mostAvailablePath.getAvailable().bytes() < info.getAvailable().bytes()) { + mostAvailablePath = info; + } + } + String nodeId = nodeStats.getNode().id(); + String nodeName = nodeStats.getNode().getName(); + if (logger.isTraceEnabled()) { + logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}", nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(), leastAvailablePath.getTotal(), leastAvailablePath.getAvailable()); + } + newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes())); + newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes())); + + } + } + } + } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 97749e63819..1e3bd9614dd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.IntSet; import com.google.common.base.Predicate; +import com.google.common.base.Predicates; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.UnmodifiableIterator; @@ -223,6 +224,38 @@ public class RoutingTable implements Iterable, DiffablealwaysTrue(), false); + } + + public ShardsIterator allShardsIncludingRelocationTargets(String[] indices) { + return allShardsSatisfyingPredicate(indices, Predicates.alwaysTrue(), true); + } + + // TODO: replace with JDK 8 native java.util.function.Predicate + private ShardsIterator allShardsSatisfyingPredicate(String[] indices, Predicate predicate, boolean includeRelocationTargets) { + // use list here since we need to maintain identity across shards + List shards = new ArrayList<>(); + for (String index : indices) { + IndexRoutingTable indexRoutingTable = index(index); + if (indexRoutingTable == null) { + continue; + // we simply ignore indices that don't exists (make sense for operations that use it currently) + } + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + if (predicate.apply(shardRouting)) { + shards.add(shardRouting); + if (includeRelocationTargets && shardRouting.relocating()) { + shards.add(shardRouting.buildTargetRelocatingShard()); + } + } + } + } + } + return new PlainShardsIterator(shards); + } + /** * All the *active* primary shards for the provided indices grouped (each group is a single element, consisting * of the primary shard). This is handy for components that expect to get group iterators, but still want in some diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 2a438de800f..8b8652a067a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -164,7 +164,7 @@ public class DiskThresholdDecider extends AllocationDecider { @Override public void onNewInfo(ClusterInfo info) { - Map usages = info.getNodeDiskUsages(); + Map usages = info.getNodeLeastAvailableDiskUsages(); if (usages != null) { boolean reroute = false; String explanation = ""; @@ -313,13 +313,16 @@ public class DiskThresholdDecider extends AllocationDecider { * If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size * of all shards */ - public static long sizeOfRelocatingShards(RoutingNode node, ClusterInfo clusterInfo, boolean subtractShardsMovingAway) { + public static long sizeOfRelocatingShards(RoutingNode node, ClusterInfo clusterInfo, boolean subtractShardsMovingAway, String dataPath) { long totalSize = 0; for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) { - if (routing.initializing() && routing.relocatingNodeId() != null) { - totalSize += getShardSize(routing, clusterInfo); - } else if (subtractShardsMovingAway && routing.relocating()) { - totalSize -= getShardSize(routing, clusterInfo); + String actualPath = clusterInfo.getDataPath(routing); + if (dataPath.equals(actualPath)) { + if (routing.initializing() && routing.relocatingNodeId() != null) { + totalSize += getShardSize(routing, clusterInfo); + } else if (subtractShardsMovingAway && routing.relocating()) { + totalSize -= getShardSize(routing, clusterInfo); + } } } return totalSize; @@ -339,7 +342,9 @@ public class DiskThresholdDecider extends AllocationDecider { final double usedDiskThresholdLow = 100.0 - DiskThresholdDecider.this.freeDiskThresholdLow; final double usedDiskThresholdHigh = 100.0 - DiskThresholdDecider.this.freeDiskThresholdHigh; - DiskUsage usage = getDiskUsage(node, allocation); + ClusterInfo clusterInfo = allocation.clusterInfo(); + Map usages = clusterInfo.getNodeMostAvailableDiskUsages(); + DiskUsage usage = getDiskUsage(node, allocation, usages); // First, check that the node currently over the low watermark double freeDiskPercentage = usage.getFreeDiskAsPercentage(); // Cache the used disk percentage for displaying disk percentages consistent with documentation @@ -441,17 +446,26 @@ public class DiskThresholdDecider extends AllocationDecider { @Override public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (shardRouting.currentNodeId().equals(node.nodeId()) == false) { + throw new IllegalArgumentException("Shard [" + shardRouting + "] is not allocated on node: [" + node.nodeId() + "]"); + } final Decision decision = earlyTerminate(allocation); if (decision != null) { return decision; } - DiskUsage usage = getDiskUsage(node, allocation); + final ClusterInfo clusterInfo = allocation.clusterInfo(); + final Map usages = clusterInfo.getNodeLeastAvailableDiskUsages(); + final DiskUsage usage = getDiskUsage(node, allocation, usages); + final String dataPath = clusterInfo.getDataPath(shardRouting); // If this node is already above the high threshold, the shard cannot remain (get it off!) - double freeDiskPercentage = usage.getFreeDiskAsPercentage(); - long freeBytes = usage.getFreeBytes(); + final double freeDiskPercentage = usage.getFreeDiskAsPercentage(); + final long freeBytes = usage.getFreeBytes(); if (logger.isDebugEnabled()) { logger.debug("node [{}] has {}% free disk ({} bytes)", node.nodeId(), freeDiskPercentage, freeBytes); } + if (dataPath == null || usage.getPath().equals(dataPath) == false) { + return allocation.decision(Decision.YES, NAME, "shard is not allocated on the most utilized disk"); + } if (freeBytes < freeBytesThresholdHigh.bytes()) { if (logger.isDebugEnabled()) { logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, shard cannot remain", @@ -472,9 +486,8 @@ public class DiskThresholdDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "enough disk for shard to remain on node, free: [%s]", new ByteSizeValue(freeBytes)); } - private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation) { + private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, Map usages) { ClusterInfo clusterInfo = allocation.clusterInfo(); - Map usages = clusterInfo.getNodeDiskUsages(); DiskUsage usage = usages.get(node.nodeId()); if (usage == null) { // If there is no usage, and we have other nodes in the cluster, @@ -487,8 +500,8 @@ public class DiskThresholdDecider extends AllocationDecider { } if (includeRelocations) { - long relocatingShardsSize = sizeOfRelocatingShards(node, clusterInfo, true); - DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().name(), + long relocatingShardsSize = sizeOfRelocatingShards(node, clusterInfo, true, usage.getPath()); + DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().name(), usage.getPath(), usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize); if (logger.isTraceEnabled()) { logger.trace("usage without relocations: {}", usage); @@ -508,7 +521,7 @@ public class DiskThresholdDecider extends AllocationDecider { */ public DiskUsage averageUsage(RoutingNode node, Map usages) { if (usages.size() == 0) { - return new DiskUsage(node.nodeId(), node.node().name(), 0, 0); + return new DiskUsage(node.nodeId(), node.node().name(), "_na_", 0, 0); } long totalBytes = 0; long freeBytes = 0; @@ -516,7 +529,7 @@ public class DiskThresholdDecider extends AllocationDecider { totalBytes += du.getTotalBytes(); freeBytes += du.getFreeBytes(); } - return new DiskUsage(node.nodeId(), node.node().name(), totalBytes / usages.size(), freeBytes / usages.size()); + return new DiskUsage(node.nodeId(), node.node().name(), "_na_", totalBytes / usages.size(), freeBytes / usages.size()); } /** @@ -528,8 +541,8 @@ public class DiskThresholdDecider extends AllocationDecider { */ public double freeDiskPercentageAfterShardAssigned(DiskUsage usage, Long shardSize) { shardSize = (shardSize == null) ? 0 : shardSize; - DiskUsage newUsage = new DiskUsage(usage.getNodeId(), usage.getNodeName(), - usage.getTotalBytes(), usage.getFreeBytes() - shardSize); + DiskUsage newUsage = new DiskUsage(usage.getNodeId(), usage.getNodeName(), usage.getPath(), + usage.getTotalBytes(), usage.getFreeBytes() - shardSize); return newUsage.getFreeDiskAsPercentage(); } @@ -600,7 +613,7 @@ public class DiskThresholdDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "cluster info unavailable"); } - final Map usages = clusterInfo.getNodeDiskUsages(); + final Map usages = clusterInfo.getNodeLeastAvailableDiskUsages(); // Fail open if there are no disk usages available if (usages.isEmpty()) { if (logger.isTraceEnabled()) { diff --git a/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java b/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java index 3646595f724..bb7a642c987 100644 --- a/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java +++ b/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java @@ -46,8 +46,7 @@ public class LZFCompressor implements Compressor { public LZFCompressor() { this.decoder = ChunkDecoderFactory.safeInstance(); - Loggers.getLogger(LZFCompressor.class).debug("using encoder [{}] and decoder[{}] ", - this.decoder.getClass().getSimpleName()); + Loggers.getLogger(LZFCompressor.class).debug("using decoder[{}] ", this.decoder.getClass().getSimpleName()); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java b/core/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java index a4a154330aa..da57b1c882a 100644 --- a/core/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java +++ b/core/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java @@ -27,7 +27,6 @@ import org.elasticsearch.*; import org.elasticsearch.common.Base64; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.ByteArray; import java.io.*; import java.net.HttpURLConnection; @@ -37,9 +36,7 @@ import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.attribute.FileTime; -import java.util.Arrays; import java.util.List; -import java.util.concurrent.Callable; /** * @@ -152,12 +149,6 @@ public class HttpDownloadHelper { } catch (FileNotFoundException | NoSuchFileException e) { // checksum file doesn't exist return false; - } catch (IOException e) { - if (ExceptionsHelper.unwrapCause(e) instanceof FileNotFoundException) { - // checksum file didn't exist - return false; - } - throw e; } finally { IOUtils.deleteFilesIgnoringExceptions(checksumFile); } @@ -378,9 +369,6 @@ public class HttpDownloadHelper { responseCode == HttpURLConnection.HTTP_MOVED_TEMP || responseCode == HttpURLConnection.HTTP_SEE_OTHER) { String newLocation = httpConnection.getHeaderField("Location"); - String message = aSource - + (responseCode == HttpURLConnection.HTTP_MOVED_PERM ? " permanently" - : "") + " moved to " + newLocation; URL newURL = new URL(newLocation); if (!redirectionAllowed(aSource, newURL)) { return null; @@ -426,7 +414,7 @@ public class HttpDownloadHelper { } } if (is == null) { - throw new IOException("Can't get " + source + " to " + dest, lastEx); + throw lastEx; } os = Files.newOutputStream(dest); diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index f38a894cc7e..d22a7540491 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -22,6 +22,7 @@ package org.elasticsearch.index; import com.google.common.base.Function; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; + import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -56,8 +57,10 @@ import org.elasticsearch.plugins.PluginsService; import java.io.Closeable; import java.io.IOException; +import java.nio.file.Path; import java.util.HashMap; import java.util.Iterator; +import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -315,8 +318,23 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone throw t; } } + if (path == null) { - path = ShardPath.selectNewPathForShard(nodeEnv, shardId, indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), this); + // TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard + // that's being relocated/replicated we know how large it will become once it's done copying: + + // Count up how many shards are currently on each data path: + Map dataPathToShardCount = new HashMap<>(); + for(IndexShard shard : this) { + Path dataPath = shard.shardPath().getRootStatePath(); + Integer curCount = dataPathToShardCount.get(dataPath); + if (curCount == null) { + curCount = 0; + } + dataPathToShardCount.put(dataPath, curCount+1); + } + path = ShardPath.selectNewPathForShard(nodeEnv, shardId, indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), + dataPathToShardCount); logger.debug("{} creating using a new path [{}]", shardId, path); } else { logger.debug("{} creating using an existing path [{}]", shardId, path); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 403162cf7fc..2551b0774a8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -104,8 +104,9 @@ class DocumentParser implements Closeable { if (token != XContentParser.Token.START_OBJECT) { throw new MapperParsingException("Malformed content, must start with an object"); } + + boolean emptyDoc = false; if (mapping.root.isEnabled()) { - boolean emptyDoc = false; token = parser.nextToken(); if (token == XContentParser.Token.END_OBJECT) { // empty doc, we can handle it... @@ -113,23 +114,24 @@ class DocumentParser implements Closeable { } else if (token != XContentParser.Token.FIELD_NAME) { throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist"); } + } - for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { - metadataMapper.preParse(context); - } - if (emptyDoc == false) { - Mapper update = parseObject(context, mapping.root); - if (update != null) { - context.addDynamicMappingsUpdate(update); - } - } - for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { - metadataMapper.postParse(context); - } + for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { + metadataMapper.preParse(context); + } - } else { + if (mapping.root.isEnabled() == false) { // entire type is disabled parser.skipChildren(); + } else if (emptyDoc == false) { + Mapper update = parseObject(context, mapping.root); + if (update != null) { + context.addDynamicMappingsUpdate(update); + } + } + + for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { + metadataMapper.postParse(context); } // try to parse the next token, this should be null if the object is ended properly diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java index 8feed273c4f..b2b26c12d19 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -199,19 +199,27 @@ public final class ShardPath { } public static ShardPath selectNewPathForShard(NodeEnvironment env, ShardId shardId, @IndexSettings Settings indexSettings, - long avgShardSizeInBytes, Iterable shards) throws IOException { + long avgShardSizeInBytes, Map dataPathToShardCount) throws IOException { final Path dataPath; final Path statePath; - final String indexUUID = indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); - if (NodeEnvironment.hasCustomDataPath(indexSettings)) { dataPath = env.resolveCustomLocation(indexSettings, shardId); statePath = env.nodePaths()[0].resolve(shardId); } else { - Map estReservedBytes = getEstimatedReservedBytes(env, avgShardSizeInBytes, shards); + long totFreeSpace = 0; + for (NodeEnvironment.NodePath nodePath : env.nodePaths()) { + totFreeSpace += nodePath.fileStore.getUsableSpace(); + } + + // TODO: this is a hack!! We should instead keep track of incoming (relocated) shards since we know + // how large they will be once they're done copying, instead of a silly guess for such cases: + + // Very rough heurisic of how much disk space we expect the shard will use over its lifetime, the max of current average + // shard size across the cluster and 5% of the total available free space on this node: + long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0)); // TODO - do we need something more extensible? Yet, this does the job for now... final NodeEnvironment.NodePath[] paths = env.nodePaths(); @@ -220,10 +228,11 @@ public final class ShardPath { for (NodeEnvironment.NodePath nodePath : paths) { FileStore fileStore = nodePath.fileStore; long usableBytes = fileStore.getUsableSpace(); - Long reservedBytes = estReservedBytes.get(nodePath.path); - if (reservedBytes != null) { - // Deduct estimated reserved bytes from usable space: - usableBytes -= reservedBytes; + + // Deduct estimated reserved bytes from usable space: + Integer count = dataPathToShardCount.get(nodePath.path); + if (count != null) { + usableBytes -= estShardSizeInBytes * count; } if (usableBytes > maxUsableBytes) { maxUsableBytes = usableBytes; @@ -235,6 +244,8 @@ public final class ShardPath { dataPath = statePath; } + final String indexUUID = indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); + return new ShardPath(NodeEnvironment.hasCustomDataPath(indexSettings), dataPath, statePath, indexUUID, shardId); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 4c3b1f5e6a4..56cd225e996 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -254,7 +254,7 @@ public class IndicesService extends AbstractLifecycleComponent i if (indexShard.routingEntry() == null) { continue; } - IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard, flags) }); + IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indexShard, flags), indexShard.commitStats()) }); if (!statsByShard.containsKey(indexService.index())) { statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats)); } else { diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 6da2270002a..437d6b5cdb1 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -73,6 +73,7 @@ import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.percolator.PercolatorModule; import org.elasticsearch.percolator.PercolatorService; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesModule; @@ -95,6 +96,8 @@ import org.elasticsearch.watcher.ResourceWatcherService; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -117,19 +120,22 @@ public class Node implements Releasable { private final PluginsService pluginsService; private final Client client; - public Node() { - this(Settings.Builder.EMPTY_SETTINGS, true); + /** + * Constructs a node with the given settings. + * + * @param preparedSettings Base settings to configure the node with + * @param loadConfigSettings true if settings should also be loaded and merged from configuration files + */ + public Node(Settings preparedSettings, boolean loadConfigSettings) { + this(preparedSettings, loadConfigSettings, Version.CURRENT, Collections.>emptyList()); } - public Node(Settings preparedSettings, boolean loadConfigSettings) { + Node(Settings preparedSettings, boolean loadConfigSettings, Version version, Collection> classpathPlugins) { final Settings pSettings = settingsBuilder().put(preparedSettings) .put(Client.CLIENT_TYPE_SETTING, CLIENT_TYPE).build(); Tuple tuple = InternalSettingsPreparer.prepareSettings(pSettings, loadConfigSettings); tuple = new Tuple<>(TribeService.processSettings(tuple.v1()), tuple.v2()); - // The only place we can actually fake the version a node is running on: - Version version = pSettings.getAsVersion("tests.mock.version", Version.CURRENT); - ESLogger logger = Loggers.getLogger(Node.class, tuple.v1().get("name")); logger.info("version[{}], pid[{}], build[{}/{}]", version, JvmInfo.jvmInfo().pid(), Build.CURRENT.hashShort(), Build.CURRENT.timestamp()); @@ -141,7 +147,7 @@ public class Node implements Releasable { env.configFile(), Arrays.toString(env.dataFiles()), env.logsFile(), env.pluginsFile()); } - this.pluginsService = new PluginsService(tuple.v1(), tuple.v2()); + this.pluginsService = new PluginsService(tuple.v1(), tuple.v2(), classpathPlugins); this.settings = pluginsService.updatedSettings(); // create the environment based on the finalized (processed) view of the settings this.environment = new Environment(this.settings()); @@ -421,15 +427,4 @@ public class Node implements Releasable { public Injector injector() { return this.injector; } - - public static void main(String[] args) throws Exception { - final Node node = new Node(); - node.start(); - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - node.close(); - } - }); - } } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java index e03b95084bf..8025be2784f 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java @@ -463,17 +463,15 @@ public class PluginManager { if (version != null) { // Elasticsearch new download service uses groupId org.elasticsearch.plugin from 2.0.0 if (user == null) { - // TODO Update to https if (!Strings.isNullOrEmpty(System.getProperty(PROPERTY_SUPPORT_STAGING_URLS))) { - addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", version, Build.CURRENT.hashShort(), name, version, name, version)); + addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", version, Build.CURRENT.hashShort(), name, version, name, version)); } - addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip", name, version, name, version)); + addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip", name, version, name, version)); } else { // Elasticsearch old download service - // TODO Update to https - addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/%1$s/%2$s/%2$s-%3$s.zip", user, name, version)); + addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/%1$s/%2$s/%2$s-%3$s.zip", user, name, version)); // Maven central repository - addUrl(urls, String.format(Locale.ROOT, "http://search.maven.org/remotecontent?filepath=%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version)); + addUrl(urls, String.format(Locale.ROOT, "https://search.maven.org/remotecontent?filepath=%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version)); // Sonatype repository addUrl(urls, String.format(Locale.ROOT, "https://oss.sonatype.org/service/local/repositories/releases/content/%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version)); // Github repository diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index 8b3488ef4da..84b640e4c1b 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -70,10 +70,10 @@ public class PluginsService extends AbstractComponent { /** * We keep around a list of plugins */ - private final ImmutableList> plugins; + private final List> plugins; private final PluginsInfo info; - private final ImmutableMap> onModuleReferences; + private final Map> onModuleReferences; static class OnModuleReference { public final Class moduleClass; @@ -89,20 +89,19 @@ public class PluginsService extends AbstractComponent { * Constructs a new PluginService * @param settings The settings of the system * @param environment The environment of the system + * @param classpathPlugins Plugins that exist in the classpath which should be loaded */ - public PluginsService(Settings settings, Environment environment) { + public PluginsService(Settings settings, Environment environment, Collection> classpathPlugins) { super(settings); - ImmutableList.Builder> tupleBuilder = ImmutableList.builder(); + List> tupleBuilder = new ArrayList<>(); - // first we load specified plugins via 'plugin.types' settings parameter. - // this is a hack for what is between unit and integration tests... - String[] defaultPluginsClasses = settings.getAsArray("plugin.types"); - for (String pluginClass : defaultPluginsClasses) { - Plugin plugin = loadPlugin(pluginClass, settings, getClass().getClassLoader()); - PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), false, "NA", true, pluginClass, false); + // first we load plugins that are on the classpath. this is for tests and transport clients + for (Class pluginClass : classpathPlugins) { + Plugin plugin = loadPlugin(pluginClass, settings); + PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), false, "NA", true, pluginClass.getName(), false); if (logger.isTraceEnabled()) { - logger.trace("plugin loaded from settings [{}]", pluginInfo); + logger.trace("plugin loaded from classpath [{}]", pluginInfo); } tupleBuilder.add(new Tuple<>(pluginInfo, plugin)); } @@ -115,7 +114,7 @@ public class PluginsService extends AbstractComponent { throw new IllegalStateException("Unable to initialize plugins", ex); } - plugins = tupleBuilder.build(); + plugins = Collections.unmodifiableList(tupleBuilder); info = new PluginsInfo(); for (Tuple tuple : plugins) { info.add(tuple.v1()); @@ -128,7 +127,7 @@ public class PluginsService extends AbstractComponent { for (Tuple tuple : plugins) { PluginInfo info = tuple.v1(); if (info.isJvm()) { - jvmPlugins.put(tuple.v2().name(), tuple.v2()); + jvmPlugins.put(info.getName(), tuple.v2()); } if (info.isSite()) { sitePlugins.add(info.getName()); @@ -151,7 +150,7 @@ public class PluginsService extends AbstractComponent { logger.info("loaded {}, sites {}", jvmPlugins.keySet(), sitePlugins); - MapBuilder> onModuleReferences = MapBuilder.newMapBuilder(); + Map> onModuleReferences = new HashMap<>(); for (Plugin plugin : jvmPlugins.values()) { List list = new ArrayList<>(); for (Method method : plugin.getClass().getMethods()) { @@ -173,10 +172,10 @@ public class PluginsService extends AbstractComponent { onModuleReferences.put(plugin, list); } } - this.onModuleReferences = onModuleReferences.immutableMap(); + this.onModuleReferences = Collections.unmodifiableMap(onModuleReferences); } - public ImmutableList> plugins() { + public List> plugins() { return plugins; } @@ -355,7 +354,8 @@ public class PluginsService extends AbstractComponent { if (pluginInfo.isJvm()) { // reload lucene SPI with any new services from the plugin reloadLuceneSPI(loader); - plugin = loadPlugin(pluginInfo.getClassname(), settings, loader); + Class pluginClass = loadPluginClass(pluginInfo.getClassname(), loader); + plugin = loadPlugin(pluginClass, settings); } else { plugin = new SitePlugin(pluginInfo.getName(), pluginInfo.getDescription()); } @@ -384,10 +384,16 @@ public class PluginsService extends AbstractComponent { TokenizerFactory.reloadTokenizers(loader); } - private Plugin loadPlugin(String className, Settings settings, ClassLoader loader) { + private Class loadPluginClass(String className, ClassLoader loader) { try { - Class pluginClass = loader.loadClass(className).asSubclass(Plugin.class); + return loader.loadClass(className).asSubclass(Plugin.class); + } catch (ClassNotFoundException e) { + throw new ElasticsearchException("Could not find plugin class [" + className + "]", e); + } + } + private Plugin loadPlugin(Class pluginClass, Settings settings) { + try { try { return pluginClass.getConstructor(Settings.class).newInstance(settings); } catch (NoSuchMethodException e) { @@ -395,13 +401,12 @@ public class PluginsService extends AbstractComponent { return pluginClass.getConstructor().newInstance(); } catch (NoSuchMethodException e1) { throw new ElasticsearchException("No constructor for [" + pluginClass + "]. A plugin class must " + - "have either an empty default constructor or a single argument constructor accepting a " + - "Settings instance"); + "have either an empty default constructor or a single argument constructor accepting a " + + "Settings instance"); } } - } catch (Throwable e) { - throw new ElasticsearchException("Failed to load plugin class [" + className + "]", e); + throw new ElasticsearchException("Failed to load plugin class [" + pluginClass.getName() + "]", e); } } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java index 9d58f8f74df..4c9b3de5c79 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.rest.action.cat; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; @@ -116,19 +115,19 @@ public class RestRecoveryAction extends AbstractCatAction { Table t = getTableWithHeader(request); - for (String index : response.shardResponses().keySet()) { + for (String index : response.shardRecoveryStates().keySet()) { - List shardRecoveryResponses = response.shardResponses().get(index); - if (shardRecoveryResponses.size() == 0) { + List shardRecoveryStates = response.shardRecoveryStates().get(index); + if (shardRecoveryStates.size() == 0) { continue; } // Sort ascending by shard id for readability - CollectionUtil.introSort(shardRecoveryResponses, new Comparator() { + CollectionUtil.introSort(shardRecoveryStates, new Comparator() { @Override - public int compare(ShardRecoveryResponse o1, ShardRecoveryResponse o2) { - int id1 = o1.recoveryState().getShardId().id(); - int id2 = o2.recoveryState().getShardId().id(); + public int compare(RecoveryState o1, RecoveryState o2) { + int id1 = o1.getShardId().id(); + int id2 = o2.getShardId().id(); if (id1 < id2) { return -1; } else if (id1 > id2) { @@ -139,12 +138,10 @@ public class RestRecoveryAction extends AbstractCatAction { } }); - for (ShardRecoveryResponse shardResponse : shardRecoveryResponses) { - - RecoveryState state = shardResponse.recoveryState(); + for (RecoveryState state: shardRecoveryStates) { t.startRow(); t.addCell(index); - t.addCell(shardResponse.getShardId()); + t.addCell(state.getShardId().id()); t.addCell(state.getTimer().time()); t.addCell(state.getType().toString().toLowerCase(Locale.ROOT)); t.addCell(state.getStage().toString().toLowerCase(Locale.ROOT)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java index 972a1bc0110..734fb340090 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java @@ -21,7 +21,11 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.segments.*; +import org.elasticsearch.action.admin.indices.segments.IndexSegments; +import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; @@ -29,7 +33,10 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Segment; -import org.elasticsearch.rest.*; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.support.RestActionListener; import org.elasticsearch.rest.action.support.RestResponseListener; import org.elasticsearch.rest.action.support.RestTable; @@ -120,8 +127,8 @@ public class RestSegmentsAction extends AbstractCatAction { for (Segment segment : segments) { table.startRow(); - table.addCell(shardSegment.getIndex()); - table.addCell(shardSegment.getShardId()); + table.addCell(shardSegment.getShardRouting().getIndex()); + table.addCell(shardSegment.getShardRouting().getId()); table.addCell(shardSegment.getShardRouting().primary() ? "p" : "r"); table.addCell(nodes.get(shardSegment.getShardRouting().currentNodeId()).getHostAddress()); table.addCell(shardSegment.getShardRouting().currentNodeId()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/support/RestTable.java b/core/src/main/java/org/elasticsearch/rest/action/support/RestTable.java index 3e6eb713529..e1c62049843 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/support/RestTable.java +++ b/core/src/main/java/org/elasticsearch/rest/action/support/RestTable.java @@ -68,7 +68,7 @@ public class RestTable { public static RestResponse buildTextPlainResponse(Table table, RestChannel channel) throws IOException { RestRequest request = channel.request(); - boolean verbose = request.paramAsBoolean("v", true); + boolean verbose = request.paramAsBoolean("v", false); List headers = buildDisplayHeaders(table, request); int[] width = buildWidths(table, request, verbose, headers); diff --git a/core/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/core/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index 2b8caf8f055..2aa6818c878 100644 --- a/core/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/core/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport; import java.lang.reflect.Constructor; +import java.util.concurrent.Callable; /** * @@ -28,20 +29,19 @@ import java.lang.reflect.Constructor; public class RequestHandlerRegistry { private final String action; - private final Constructor requestConstructor; private final TransportRequestHandler handler; private final boolean forceExecution; private final String executor; + private final Callable requestFactory; RequestHandlerRegistry(String action, Class request, TransportRequestHandler handler, String executor, boolean forceExecution) { + this(action, new ReflectionFactory<>(request), handler, executor, forceExecution); + } + + public RequestHandlerRegistry(String action, Callable requestFactory, TransportRequestHandler handler, String executor, boolean forceExecution) { this.action = action; - try { - this.requestConstructor = request.getDeclaredConstructor(); - } catch (NoSuchMethodException e) { - throw new IllegalStateException("failed to create constructor (does it have a default constructor?) for request " + request, e); - } - this.requestConstructor.setAccessible(true); + this.requestFactory = requestFactory; assert newRequest() != null; this.handler = handler; this.forceExecution = forceExecution; @@ -54,7 +54,7 @@ public class RequestHandlerRegistry { public Request newRequest() { try { - return requestConstructor.newInstance(); + return requestFactory.call(); } catch (Exception e) { throw new IllegalStateException("failed to instantiate request ", e); } @@ -71,4 +71,22 @@ public class RequestHandlerRegistry { public String getExecutor() { return executor; } + + private final static class ReflectionFactory implements Callable { + private final Constructor requestConstructor; + + public ReflectionFactory(Class request) { + try { + this.requestConstructor = request.getDeclaredConstructor(); + } catch (NoSuchMethodException e) { + throw new IllegalStateException("failed to create constructor (does it have a default constructor?) for request " + request, e); + } + this.requestConstructor.setAccessible(true); + } + + @Override + public Request call() throws Exception { + return requestConstructor.newInstance(); + } + } } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportModule.java b/core/src/main/java/org/elasticsearch/transport/TransportModule.java index 0be84037700..0c8dbcdc118 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportModule.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportModule.java @@ -85,7 +85,7 @@ public class TransportModule extends AbstractModule { bind(TransportService.class).asEagerSingleton(); } else { if (transportServices.containsKey(typeName) == false) { - throw new IllegalArgumentException("Unknown TransportService [" + typeName + "]"); + throw new IllegalArgumentException("Unknown TransportService type [" + typeName + "], known types are: " + transportServices.keySet()); } bind(TransportService.class).to(transportServices.get(typeName)).asEagerSingleton(); } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index b70589ce52d..40fa908c2b3 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -22,8 +22,6 @@ package org.elasticsearch.transport; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; -import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -35,12 +33,21 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.*; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.*; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; @@ -399,6 +406,18 @@ public class TransportService extends AbstractLifecycleComponent void registerRequestHandler(String action, Callable requestFactory, String executor, TransportRequestHandler handler) { + RequestHandlerRegistry reg = new RequestHandlerRegistry<>(action, requestFactory, handler, executor, false); + registerRequestHandler(reg); + } + /** * Registers a new request handler * @param action The action the request handler is associated with @@ -408,8 +427,12 @@ public class TransportService extends AbstractLifecycleComponent void registerRequestHandler(String action, Class request, String executor, boolean forceExecution, TransportRequestHandler handler) { + RequestHandlerRegistry reg = new RequestHandlerRegistry<>(action, request, handler, executor, forceExecution); + registerRequestHandler(reg); + } + + protected void registerRequestHandler(RequestHandlerRegistry reg) { synchronized (requestHandlerMutex) { - RequestHandlerRegistry reg = new RequestHandlerRegistry<>(action, request, handler, executor, forceExecution); RequestHandlerRegistry replaced = requestHandlers.get(reg.getAction()); requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); if (replaced != null) { diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index 10b34493334..c22638268ed 100644 --- a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -95,6 +95,7 @@ import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; @@ -107,12 +108,14 @@ import org.junit.Before; import org.junit.Test; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Callable; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -141,11 +144,15 @@ public class IndicesRequestIT extends ESIntegTestCase { } @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .extendArray("plugin.types", InterceptingTransportService.TestPlugin.class.getName()) - .build(); + protected Settings nodeSettings(int ordinal) { + // must set this independently of the plugin so it overrides MockTransportService + return Settings.builder().put(super.nodeSettings(ordinal)) + .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, "intercepting").build(); + } + + @Override + protected Collection> nodePlugins() { + return pluginList(InterceptingTransportService.TestPlugin.class); } @Before @@ -395,7 +402,7 @@ public class IndicesRequestIT extends ESIntegTestCase { @Test public void testOptimize() { - String optimizeShardAction = OptimizeAction.NAME + "[s]"; + String optimizeShardAction = OptimizeAction.NAME + "[n]"; interceptTransportActions(optimizeShardAction); OptimizeRequest optimizeRequest = new OptimizeRequest(randomIndicesOrAliases()); @@ -419,7 +426,7 @@ public class IndicesRequestIT extends ESIntegTestCase { @Test public void testClearCache() { - String clearCacheAction = ClearIndicesCacheAction.NAME + "[s]"; + String clearCacheAction = ClearIndicesCacheAction.NAME + "[n]"; interceptTransportActions(clearCacheAction); ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(randomIndicesOrAliases()); @@ -431,7 +438,7 @@ public class IndicesRequestIT extends ESIntegTestCase { @Test public void testRecovery() { - String recoveryAction = RecoveryAction.NAME + "[s]"; + String recoveryAction = RecoveryAction.NAME + "[n]"; interceptTransportActions(recoveryAction); RecoveryRequest recoveryRequest = new RecoveryRequest(randomIndicesOrAliases()); @@ -443,7 +450,7 @@ public class IndicesRequestIT extends ESIntegTestCase { @Test public void testSegments() { - String segmentsAction = IndicesSegmentsAction.NAME + "[s]"; + String segmentsAction = IndicesSegmentsAction.NAME + "[n]"; interceptTransportActions(segmentsAction); IndicesSegmentsRequest segmentsRequest = new IndicesSegmentsRequest(randomIndicesOrAliases()); @@ -455,7 +462,7 @@ public class IndicesRequestIT extends ESIntegTestCase { @Test public void testIndicesStats() { - String indicesStats = IndicesStatsAction.NAME + "[s]"; + String indicesStats = IndicesStatsAction.NAME + "[n]"; interceptTransportActions(indicesStats); IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest().indices(randomIndicesOrAliases()); @@ -856,10 +863,6 @@ public class IndicesRequestIT extends ESIntegTestCase { public void onModule(TransportModule transportModule) { transportModule.addTransportService("intercepting", InterceptingTransportService.class); } - @Override - public Settings additionalSettings() { - return Settings.builder().put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, "intercepting").build(); - } } private final Set actions = new HashSet<>(); @@ -888,6 +891,11 @@ public class IndicesRequestIT extends ESIntegTestCase { super.registerRequestHandler(action, request, executor, forceExecution, new InterceptingRequestHandler(action, handler)); } + @Override + public void registerRequestHandler(String action, Callable requestFactory, String executor, TransportRequestHandler handler) { + super.registerRequestHandler(action, requestFactory, executor, new InterceptingRequestHandler(action, handler)); + } + private class InterceptingRequestHandler implements TransportRequestHandler { private final TransportRequestHandler requestHandler; diff --git a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java new file mode 100644 index 00000000000..d892a3d0834 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -0,0 +1,422 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.broadcast.node; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.cluster.TestClusterService; +import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseOptions; +import org.elasticsearch.transport.TransportService; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.object.HasToString.hasToString; + +public class TransportBroadcastByNodeActionTests extends ESTestCase { + + private static final String TEST_INDEX = "test-index"; + private static final String TEST_CLUSTER = "test-cluster"; + private static ThreadPool THREAD_POOL; + + private TestClusterService clusterService; + private CapturingTransport transport; + private TransportService transportService; + + private TestTransportBroadcastByNodeAction action; + + public static class Request extends BroadcastRequest { + public Request() { + } + + public Request(String[] indices) { + super(indices); + } + } + + public static class Response extends BroadcastResponse { + public Response() { + } + + public Response(int totalShards, int successfulShards, int failedShards, List shardFailures) { + super(totalShards, successfulShards, failedShards, shardFailures); + } + } + + class TestTransportBroadcastByNodeAction extends TransportBroadcastByNodeAction { + private final Map shards = new HashMap<>(); + + public TestTransportBroadcastByNodeAction(Settings settings, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Class request, String executor) { + super(settings, "indices:admin/test", THREAD_POOL, TransportBroadcastByNodeActionTests.this.clusterService, transportService, actionFilters, indexNameExpressionResolver, request, executor); + } + + @Override + protected EmptyResult readShardResult(StreamInput in) throws IOException { + return EmptyResult.readEmptyResultFrom(in); + } + + @Override + protected Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List emptyResults, List shardFailures, ClusterState clusterState) { + return new Response(totalShards, successfulShards, failedShards, shardFailures); + } + + @Override + protected Request readRequestFrom(StreamInput in) throws IOException { + final Request request = new Request(); + request.readFrom(in); + return request; + } + + @Override + protected EmptyResult shardOperation(Request request, ShardRouting shardRouting) { + if (rarely()) { + shards.put(shardRouting, Boolean.TRUE); + return EmptyResult.INSTANCE; + } else { + ElasticsearchException e = new ElasticsearchException("operation failed"); + shards.put(shardRouting, e); + throw e; + } + } + + @Override + protected ShardsIterator shards(ClusterState clusterState, Request request, String[] concreteIndices) { + return clusterState.routingTable().allShards(new String[]{TEST_INDEX}); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, Request request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); + } + + public Map getResults() { + return shards; + } + } + + class MyResolver extends IndexNameExpressionResolver { + public MyResolver() { + super(Settings.EMPTY); + } + + @Override + public String[] concreteIndices(ClusterState state, IndicesRequest request) { + return request.indices(); + } + } + + @BeforeClass + public static void startThreadPool() { + THREAD_POOL = new ThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName()); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + transport = new CapturingTransport(); + clusterService = new TestClusterService(THREAD_POOL); + transportService = new TransportService(transport, THREAD_POOL); + transportService.start(); + setClusterState(clusterService, TEST_INDEX); + action = new TestTransportBroadcastByNodeAction( + Settings.EMPTY, + transportService, + new ActionFilters(new HashSet()), + new MyResolver(), + Request.class, + ThreadPool.Names.SAME + ); + } + + void setClusterState(TestClusterService clusterService, String index) { + int numberOfNodes = randomIntBetween(3, 5); + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index); + + int shardIndex = -1; + for (int i = 0; i < numberOfNodes; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.put(node); + int numberOfShards = randomIntBetween(0, 10); + for (int j = 0; j < numberOfShards; j++) { + final ShardId shardId = new ShardId(index, ++shardIndex); + ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.id(), true, ShardRoutingState.STARTED, 1); + IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(shardId); + indexShard.addShard(shard); + indexRoutingTable.addIndexShard(indexShard.build()); + } + } + discoBuilder.localNodeId(newNode(0).id()); + discoBuilder.masterNodeId(newNode(numberOfNodes - 1).id()); + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName(TEST_CLUSTER)); + stateBuilder.nodes(discoBuilder); + stateBuilder.routingTable(RoutingTable.builder().add(indexRoutingTable.build()).build()); + ClusterState clusterState = stateBuilder.build(); + clusterService.setState(clusterState); + } + + static DiscoveryNode newNode(int nodeId) { + return new DiscoveryNode("node_" + nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT); + } + + @AfterClass + public static void destroyThreadPool() { + ThreadPool.terminate(THREAD_POOL, 30, TimeUnit.SECONDS); + // since static must set to null to be eligible for collection + THREAD_POOL = null; + } + + public void testGlobalBlock() { + Request request = new Request(new String[]{TEST_INDEX}); + PlainActionFuture listener = new PlainActionFuture<>(); + + ClusterBlocks.Builder block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + try { + action.new AsyncAction(request, listener).start(); + fail("expected ClusterBlockException"); + } catch (ClusterBlockException expected) { + + } + } + + public void testRequestBlock() { + Request request = new Request(new String[]{TEST_INDEX}); + PlainActionFuture listener = new PlainActionFuture<>(); + + ClusterBlocks.Builder block = ClusterBlocks.builder() + .addIndexBlock(TEST_INDEX, new ClusterBlock(1, "test-block", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + try { + action.new AsyncAction(request, listener).start(); + fail("expected ClusterBlockException"); + } catch (ClusterBlockException expected) { + + } + } + + public void testOneRequestIsSentToEachNodeHoldingAShard() { + Request request = new Request(new String[]{TEST_INDEX}); + PlainActionFuture listener = new PlainActionFuture<>(); + + action.new AsyncAction(request, listener).start(); + Map> capturedRequests = transport.capturedRequestsByTargetNode(); + + ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[]{TEST_INDEX}); + Set set = new HashSet<>(); + for (ShardRouting shard : shardIt.asUnordered()) { + set.add(shard.currentNodeId()); + } + + // check a request was sent to the right number of nodes + assertEquals(set.size(), capturedRequests.size()); + + // check requests were sent to the right nodes + assertEquals(set, capturedRequests.keySet()); + for (Map.Entry> entry : capturedRequests.entrySet()) { + // check one request was sent to each node + assertEquals(1, entry.getValue().size()); + } + } + + public void testOperationExecution() throws Exception { + ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[]{TEST_INDEX}); + Set shards = new HashSet<>(); + String nodeId = shardIt.asUnordered().iterator().next().currentNodeId(); + for (ShardRouting shard : shardIt.asUnordered()) { + if (nodeId.equals(shard.currentNodeId())) { + shards.add(shard); + } + } + final TransportBroadcastByNodeAction.BroadcastByNodeTransportRequestHandler handler = + action.new BroadcastByNodeTransportRequestHandler(); + + TestTransportChannel channel = new TestTransportChannel(); + + handler.messageReceived(action.new NodeRequest(nodeId, new Request(), new ArrayList<>(shards)), channel); + + // check the operation was executed only on the expected shards + assertEquals(shards, action.getResults().keySet()); + + TransportResponse response = channel.getCapturedResponse(); + assertTrue(response instanceof TransportBroadcastByNodeAction.NodeResponse); + TransportBroadcastByNodeAction.NodeResponse nodeResponse = (TransportBroadcastByNodeAction.NodeResponse)response; + + // check the operation was executed on the correct node + assertEquals("node id", nodeId, nodeResponse.getNodeId()); + + int successfulShards = 0; + int failedShards = 0; + for (Object result : action.getResults().values()) { + if (!(result instanceof ElasticsearchException)) { + successfulShards++; + } else { + failedShards++; + } + } + + // check the operation results + assertEquals("successful shards", successfulShards, nodeResponse.getSuccessfulShards()); + assertEquals("total shards", action.getResults().size(), nodeResponse.getTotalShards()); + assertEquals("failed shards", failedShards, nodeResponse.getExceptions().size()); + List exceptions = nodeResponse.getExceptions(); + for (BroadcastShardOperationFailedException exception : exceptions) { + assertThat(exception.getMessage(), is("operation indices:admin/test failed")); + assertThat(exception, hasToString(containsString("operation failed"))); + } + } + + public void testResultAggregation() throws ExecutionException, InterruptedException { + Request request = new Request(new String[]{TEST_INDEX}); + PlainActionFuture listener = new PlainActionFuture<>(); + + action.new AsyncAction(request, listener).start(); + Map> capturedRequests = transport.capturedRequestsByTargetNode(); + transport.clear(); + + ShardsIterator shardIt = clusterService.state().getRoutingTable().allShards(new String[]{TEST_INDEX}); + Map> map = new HashMap<>(); + for (ShardRouting shard : shardIt.asUnordered()) { + if (!map.containsKey(shard.currentNodeId())) { + map.put(shard.currentNodeId(), new ArrayList()); + } + map.get(shard.currentNodeId()).add(shard); + } + + int totalShards = 0; + int totalSuccessfulShards = 0; + int totalFailedShards = 0; + for (Map.Entry> entry : capturedRequests.entrySet()) { + List exceptions = new ArrayList<>(); + long requestId = entry.getValue().get(0).requestId; + if (rarely()) { + // simulate node failure + totalShards += map.get(entry.getKey()).size(); + totalFailedShards += map.get(entry.getKey()).size(); + transport.handleResponse(requestId, new Exception()); + } else { + List shards = map.get(entry.getKey()); + List shardResults = new ArrayList<>(); + for (ShardRouting shard : shards) { + totalShards++; + if (rarely()) { + // simulate operation failure + totalFailedShards++; + exceptions.add(new BroadcastShardOperationFailedException(shard.shardId(), "operation indices:admin/test failed")); + } else { + shardResults.add(TransportBroadcastByNodeAction.EmptyResult.INSTANCE); + } + } + totalSuccessfulShards += shardResults.size(); + TransportBroadcastByNodeAction.NodeResponse nodeResponse = action.new NodeResponse(entry.getKey(), shards.size(), shardResults, exceptions); + transport.handleResponse(requestId, nodeResponse); + } + } + + Response response = listener.get(); + assertEquals("total shards", totalShards, response.getTotalShards()); + assertEquals("successful shards", totalSuccessfulShards, response.getSuccessfulShards()); + assertEquals("failed shards", totalFailedShards, response.getFailedShards()); + assertEquals("accumulated exceptions", totalFailedShards, response.getShardFailures().length); + } + + public class TestTransportChannel implements TransportChannel { + private TransportResponse capturedResponse; + + public TransportResponse getCapturedResponse() { + return capturedResponse; + } + + @Override + public String action() { + return null; + } + + @Override + public String getProfileName() { + return ""; + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + capturedResponse = response; + } + + @Override + public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { + } + + @Override + public void sendResponse(Throwable error) throws IOException { + } + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java index ad2d1a79eda..e7ef3259199 100644 --- a/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java +++ b/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java @@ -19,7 +19,6 @@ package org.elasticsearch.benchmark.recovery; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse; import org.elasticsearch.bootstrap.BootstrapForTesting; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -30,6 +29,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.SizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.node.Node; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.transport.TransportModule; @@ -128,12 +128,12 @@ public class ReplicaRecoveryBenchmark { long currentTime = System.currentTimeMillis(); long currentDocs = indexer.totalIndexedDocs(); RecoveryResponse recoveryResponse = client1.admin().indices().prepareRecoveries(INDEX_NAME).setActiveOnly(true).get(); - List indexRecoveries = recoveryResponse.shardResponses().get(INDEX_NAME); + List indexRecoveries = recoveryResponse.shardRecoveryStates().get(INDEX_NAME); long translogOps; long bytes; if (indexRecoveries.size() > 0) { - translogOps = indexRecoveries.get(0).recoveryState().getTranslog().recoveredOperations(); - bytes = recoveryResponse.shardResponses().get(INDEX_NAME).get(0).recoveryState().getIndex().recoveredBytes(); + translogOps = indexRecoveries.get(0).getTranslog().recoveredOperations(); + bytes = recoveryResponse.shardRecoveryStates().get(INDEX_NAME).get(0).getIndex().recoveredBytes(); } else { bytes = lastBytes = 0; translogOps = lastTranslogOps = 0; diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/ScriptComparisonBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/ScriptComparisonBenchmark.java index d0f534327a1..6581cd2e98e 100644 --- a/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/ScriptComparisonBenchmark.java +++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/ScriptComparisonBenchmark.java @@ -20,6 +20,7 @@ package org.elasticsearch.benchmark.scripts.expression; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.Client; @@ -28,13 +29,18 @@ import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.sort.ScriptSortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.joda.time.PeriodType; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.Random; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -102,10 +108,11 @@ public class ScriptComparisonBenchmark { static Client setupIndex() throws Exception { // create cluster - Settings settings = settingsBuilder().put("plugin.types", NativeScriptPlugin.class.getName()) - .put("name", "node1") - .build(); - Node node1 = nodeBuilder().clusterName(clusterName).settings(settings).node(); + Settings settings = settingsBuilder().put("name", "node1") + .put("cluster.name", clusterName).build(); + Collection> plugins = Collections.>singletonList(NativeScriptPlugin.class); + Node node1 = new MockNode(settings, true, Version.CURRENT, plugins); + node1.start(); Client client = node1.client(); client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java index 23da127ba8b..7ad1837c215 100644 --- a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java +++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java @@ -18,14 +18,19 @@ */ package org.elasticsearch.benchmark.scripts.score; +import org.elasticsearch.Version; import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin; import org.elasticsearch.benchmark.scripts.score.script.NativeConstantForLoopScoreScript; import org.elasticsearch.benchmark.scripts.score.script.NativeConstantScoreScript; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map.Entry; @@ -46,10 +51,13 @@ public class ScriptsConstantScoreBenchmark extends BasicScriptBenchmark { init(maxTerms); List allResults = new ArrayList<>(); - Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build(); String clusterName = ScriptsConstantScoreBenchmark.class.getSimpleName(); - Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node(); + Settings settings = settingsBuilder().put("name", "node1") + .put("cluster.name", clusterName).build(); + Collection> plugins = Collections.>singletonList(NativeScriptExamplesPlugin.class); + Node node1 = new MockNode(settings, true, Version.CURRENT, plugins); + node1.start(); Client client = node1.client(); client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java index 889a45c4589..712b6130488 100644 --- a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java +++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java @@ -18,13 +18,18 @@ */ package org.elasticsearch.benchmark.scripts.score; +import org.elasticsearch.Version; import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin; import org.elasticsearch.benchmark.scripts.score.script.NativeNaiveTFIDFScoreScript; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map.Entry; @@ -46,10 +51,12 @@ public class ScriptsScoreBenchmark extends BasicScriptBenchmark { boolean runMVEL = false; init(maxTerms); List allResults = new ArrayList<>(); - Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build(); - String clusterName = ScriptsScoreBenchmark.class.getSimpleName(); - Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node(); + Settings settings = settingsBuilder().put("name", "node1") + .put("cluster.name", clusterName).build(); + Collection> plugins = Collections.>singletonList(NativeScriptExamplesPlugin.class); + Node node1 = new MockNode(settings, true, Version.CURRENT, plugins); + node1.start(); Client client = node1.client(); client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java index 786f943b2e5..556c224f1f3 100644 --- a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java +++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java @@ -18,14 +18,19 @@ */ package org.elasticsearch.benchmark.scripts.score; +import org.elasticsearch.Version; import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin; import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumNoRecordScoreScript; import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumScoreScript; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map.Entry; @@ -46,10 +51,12 @@ public class ScriptsScorePayloadSumBenchmark extends BasicScriptBenchmark { init(maxTerms); List allResults = new ArrayList<>(); - Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build(); - String clusterName = ScriptsScoreBenchmark.class.getSimpleName(); - Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node(); + Settings settings = settingsBuilder().put("name", "node1") + .put("cluster.name", clusterName).build(); + Collection> plugins = Collections.>singletonList(NativeScriptExamplesPlugin.class); + Node node1 = new MockNode(settings, true, Version.CURRENT, plugins); + node1.start(); Client client = node1.client(); client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index 20d14f33dd4..631b2d18839 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import org.junit.Test; +import java.util.Collection; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -63,12 +64,13 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTests { @Override protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) { - TransportClient client = TransportClient.builder().settings(Settings.builder() + TransportClient client = TransportClient.builder() + .settings(Settings.builder() .put("client.transport.sniff", false) .put("node.name", "transport_client_" + this.getTestName()) - .put("plugin.types", InternalTransportService.TestPlugin.class.getName()) .put(headersSettings) - .build()).build(); + .build()) + .addPlugin(InternalTransportService.TestPlugin.class).build(); client.addTransportAddress(address); return client; @@ -76,15 +78,17 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTests { @Test public void testWithSniffing() throws Exception { - TransportClient client = TransportClient.builder().settings(Settings.builder() + TransportClient client = TransportClient.builder() + .settings(Settings.builder() .put("client.transport.sniff", true) .put("cluster.name", "cluster1") .put("node.name", "transport_client_" + this.getTestName() + "_1") - .put("client.transport.nodes_sampler_interval", "1s") - .put("plugin.types", InternalTransportService.TestPlugin.class.getName()) + .put("client.transport.nodes_sampler_interval", "1s") .put(HEADER_SETTINGS) - .put("path.home", createTempDir().toString()) - .build()).build(); + .put("path.home", createTempDir().toString()).build()) + .addPlugin(InternalTransportService.TestPlugin.class) + .build(); + try { client.addTransportAddress(address); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 6368e5aa1d9..0abc7c48955 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -31,10 +31,16 @@ import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -47,6 +53,7 @@ import org.hamcrest.Matchers; import org.junit.Test; import java.io.IOException; +import java.util.Collection; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -146,16 +153,23 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { return Settings.builder() // manual collection or upon cluster forming. .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, "1s") - .putArray("plugin.types", TestPlugin.class.getName(), MockTransportService.TestPlugin.class.getName()) .build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(TestPlugin.class, + MockTransportService.TestPlugin.class); + } + @Test public void testClusterInfoServiceCollectsInformation() throws Exception { internalCluster().startNodesAsync(2, Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "200ms").build()) .get(); - assertAcked(prepareCreate("test").setSettings(settingsBuilder().put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0).build())); + assertAcked(prepareCreate("test").setSettings(settingsBuilder() + .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE).build())); ensureGreen("test"); InternalTestCluster internalTestCluster = internalCluster(); // Get the cluster info service on the master node @@ -164,13 +178,18 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { infoService.addListener(listener); ClusterInfo info = listener.get(); assertNotNull("info should not be null", info); - Map usages = info.getNodeDiskUsages(); - Map shardSizes = info.shardSizes; - assertNotNull(usages); + final Map leastUsages = info.getNodeLeastAvailableDiskUsages(); + final Map mostUsages = info.getNodeMostAvailableDiskUsages(); + final Map shardSizes = info.shardSizes; + assertNotNull(leastUsages); assertNotNull(shardSizes); - assertThat("some usages are populated", usages.values().size(), Matchers.equalTo(2)); + assertThat("some usages are populated", leastUsages.values().size(), Matchers.equalTo(2)); assertThat("some shard sizes are populated", shardSizes.values().size(), greaterThan(0)); - for (DiskUsage usage : usages.values()) { + for (DiskUsage usage : leastUsages.values()) { + logger.info("--> usage: {}", usage); + assertThat("usage has be retrieved", usage.getFreeBytes(), greaterThan(0L)); + } + for (DiskUsage usage : mostUsages.values()) { logger.info("--> usage: {}", usage); assertThat("usage has be retrieved", usage.getFreeBytes(), greaterThan(0L)); } @@ -178,6 +197,21 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { logger.info("--> shard size: {}", size); assertThat("shard size is greater than 0", size, greaterThan(0L)); } + ClusterService clusterService = internalTestCluster.getInstance(ClusterService.class, internalTestCluster.getMasterName()); + ClusterState state = clusterService.state(); + RoutingNodes routingNodes = state.getRoutingNodes(); + for (ShardRouting shard : routingNodes.getRoutingTable().allShards()) { + String dataPath = info.getDataPath(shard); + assertNotNull(dataPath); + + String nodeId = shard.currentNodeId(); + DiscoveryNode discoveryNode = state.getNodes().get(nodeId); + IndicesService indicesService = internalTestCluster.getInstance(IndicesService.class, discoveryNode.getName()); + IndexService indexService = indicesService.indexService(shard.index()); + IndexShard indexShard = indexService.shard(shard.id()); + assertEquals(indexShard.shardPath().getRootDataPath().toString(), dataPath); + } + } @Test @@ -197,14 +231,14 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { infoService.updateOnce(); ClusterInfo info = listener.get(); assertNotNull("failed to collect info", info); - assertThat("some usages are populated", info.getNodeDiskUsages().size(), Matchers.equalTo(2)); + assertThat("some usages are populated", info.getNodeLeastAvailableDiskUsages().size(), Matchers.equalTo(2)); assertThat("some shard sizes are populated", info.shardSizes.size(), greaterThan(0)); MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, internalTestCluster.getMasterName()); final AtomicBoolean timeout = new AtomicBoolean(false); - final Set blockedActions = ImmutableSet.of(NodesStatsAction.NAME, NodesStatsAction.NAME + "[n]", IndicesStatsAction.NAME, IndicesStatsAction.NAME + "[s]"); + final Set blockedActions = ImmutableSet.of(NodesStatsAction.NAME, NodesStatsAction.NAME + "[n]", IndicesStatsAction.NAME, IndicesStatsAction.NAME + "[n]"); // drop all outgoing stats requests to force a timeout. for (DiscoveryNode node : internalTestCluster.clusterService().state().getNodes()) { mockTransportService.addDelegate(node, new MockTransportService.DelegateTransport(mockTransportService.original()) { @@ -231,7 +265,8 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { // node info will time out both on the request level on the count down latch. this means // it is likely to update the node disk usage based on the one response that came be from local // node. - assertThat(info.getNodeDiskUsages().size(), greaterThanOrEqualTo(1)); + assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThanOrEqualTo(1)); + assertThat(info.getNodeMostAvailableDiskUsages().size(), greaterThanOrEqualTo(1)); // indices is guaranteed to time out on the latch, not updating anything. assertThat(info.shardSizes.size(), greaterThan(1)); @@ -252,7 +287,8 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { infoService.updateOnce(); info = listener.get(); assertNotNull("info should not be null", info); - assertThat(info.getNodeDiskUsages().size(), equalTo(0)); + assertThat(info.getNodeLeastAvailableDiskUsages().size(), equalTo(0)); + assertThat(info.getNodeMostAvailableDiskUsages().size(), equalTo(0)); assertThat(info.shardSizes.size(), equalTo(0)); // check we recover @@ -261,7 +297,8 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { infoService.updateOnce(); info = listener.get(); assertNotNull("info should not be null", info); - assertThat(info.getNodeDiskUsages().size(), equalTo(2)); + assertThat(info.getNodeLeastAvailableDiskUsages().size(), equalTo(2)); + assertThat(info.getNodeMostAvailableDiskUsages().size(), equalTo(2)); assertThat(info.shardSizes.size(), greaterThan(0)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index 9776f667dc9..5c3d0d966c2 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -69,6 +69,11 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.SuppressLocalMode public class ClusterServiceIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return pluginList(TestPlugin.class); + } + @Test public void testTimeoutUpdateTask() throws Exception { Settings settings = settingsBuilder() @@ -637,7 +642,6 @@ public class ClusterServiceIT extends ESIntegTestCase { .put("discovery.zen.minimum_master_nodes", 1) .put("discovery.zen.ping_timeout", "400ms") .put("discovery.initial_state_timeout", "500ms") - .put("plugin.types", TestPlugin.class.getName()) .build(); String node_0 = internalCluster().startNode(settings); diff --git a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index df9c1883dd8..a4278295369 100644 --- a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -19,16 +19,33 @@ package org.elasticsearch.cluster; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingHelper; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.store.StoreStats; +import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.test.ESTestCase; import org.junit.Test; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + import static org.hamcrest.Matchers.equalTo; public class DiskUsageTests extends ESTestCase { @Test public void diskUsageCalcTest() { - DiskUsage du = new DiskUsage("node1", "n1", 100, 40); + DiskUsage du = new DiskUsage("node1", "n1", "random", 100, 40); assertThat(du.getFreeDiskAsPercentage(), equalTo(40.0)); assertThat(du.getUsedDiskAsPercentage(), equalTo(100.0 - 40.0)); assertThat(du.getFreeBytes(), equalTo(40L)); @@ -37,19 +54,19 @@ public class DiskUsageTests extends ESTestCase { // Test that DiskUsage handles invalid numbers, as reported by some // filesystems (ZFS & NTFS) - DiskUsage du2 = new DiskUsage("node1", "n1", 100, 101); + DiskUsage du2 = new DiskUsage("node1", "n1","random", 100, 101); assertThat(du2.getFreeDiskAsPercentage(), equalTo(101.0)); assertThat(du2.getFreeBytes(), equalTo(101L)); assertThat(du2.getUsedBytes(), equalTo(-1L)); assertThat(du2.getTotalBytes(), equalTo(100L)); - DiskUsage du3 = new DiskUsage("node1", "n1", -1, -1); + DiskUsage du3 = new DiskUsage("node1", "n1", "random",-1, -1); assertThat(du3.getFreeDiskAsPercentage(), equalTo(100.0)); assertThat(du3.getFreeBytes(), equalTo(-1L)); assertThat(du3.getUsedBytes(), equalTo(0L)); assertThat(du3.getTotalBytes(), equalTo(-1L)); - DiskUsage du4 = new DiskUsage("node1", "n1", 0, 0); + DiskUsage du4 = new DiskUsage("node1", "n1","random", 0, 0); assertThat(du4.getFreeDiskAsPercentage(), equalTo(100.0)); assertThat(du4.getFreeBytes(), equalTo(0L)); assertThat(du4.getUsedBytes(), equalTo(0L)); @@ -62,7 +79,7 @@ public class DiskUsageTests extends ESTestCase { for (int i = 1; i < iters; i++) { long total = between(Integer.MIN_VALUE, Integer.MAX_VALUE); long free = between(Integer.MIN_VALUE, Integer.MAX_VALUE); - DiskUsage du = new DiskUsage("random", "random", total, free); + DiskUsage du = new DiskUsage("random", "random", "random", total, free); if (total == 0) { assertThat(du.getFreeBytes(), equalTo(free)); assertThat(du.getTotalBytes(), equalTo(0L)); @@ -78,4 +95,85 @@ public class DiskUsageTests extends ESTestCase { } } } + + public void testFillShardLevelInfo() { + ShardRouting test_0 = ShardRouting.newUnassigned("test", 0, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRoutingHelper.initialize(test_0, "node1"); + ShardRoutingHelper.moveToStarted(test_0); + Path test0Path = createTempDir().resolve("indices").resolve("test").resolve("0"); + CommonStats commonStats0 = new CommonStats(); + commonStats0.store = new StoreStats(100, 1); + ShardRouting test_1 = ShardRouting.newUnassigned("test", 1, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRoutingHelper.initialize(test_1, "node2"); + ShardRoutingHelper.moveToStarted(test_1); + Path test1Path = createTempDir().resolve("indices").resolve("test").resolve("1"); + CommonStats commonStats1 = new CommonStats(); + commonStats1.store = new StoreStats(1000, 1); + ShardStats[] stats = new ShardStats[] { + new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, "0xdeadbeef", test_0.shardId()), commonStats0 , null), + new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, "0xdeadbeef", test_1.shardId()), commonStats1 , null) + }; + HashMap shardSizes = new HashMap<>(); + HashMap routingToPath = new HashMap<>(); + InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath); + assertEquals(2, shardSizes.size()); + assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_0))); + assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_1))); + assertEquals(100l, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(test_0)).longValue()); + assertEquals(1000l, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(test_1)).longValue()); + + assertEquals(2, routingToPath.size()); + assertTrue(routingToPath.containsKey(test_0)); + assertTrue(routingToPath.containsKey(test_1)); + assertEquals(test0Path.getParent().getParent().getParent().toAbsolutePath().toString(), routingToPath.get(test_0)); + assertEquals(test1Path.getParent().getParent().getParent().toAbsolutePath().toString(), routingToPath.get(test_1)); + } + + public void testFillDiskUsage() { + Map newLeastAvaiableUsages = new HashMap<>(); + Map newMostAvaiableUsages = new HashMap<>(); + FsInfo.Path[] node1FSInfo = new FsInfo.Path[] { + new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80), + new FsInfo.Path("/least", "/dev/sdb", 200, 190, 70), + new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280), + }; + FsInfo.Path[] node2FSInfo = new FsInfo.Path[] { + new FsInfo.Path("/least_most", "/dev/sda", 100, 90, 80), + }; + + FsInfo.Path[] node3FSInfo = new FsInfo.Path[] { + new FsInfo.Path("/least", "/dev/sda", 100, 90, 70), + new FsInfo.Path("/most", "/dev/sda", 100, 90, 80), + }; + NodeStats[] nodeStats = new NodeStats[] { + new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, + null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null), + new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, + null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null), + new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, + null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null) + }; + InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvaiableUsages, newMostAvaiableUsages); + DiskUsage leastNode_1 = newLeastAvaiableUsages.get("node_1"); + DiskUsage mostNode_1 = newMostAvaiableUsages.get("node_1"); + assertDiskUsage(mostNode_1, node1FSInfo[2]); + assertDiskUsage(leastNode_1, node1FSInfo[1]); + + DiskUsage leastNode_2 = newLeastAvaiableUsages.get("node_2"); + DiskUsage mostNode_2 = newMostAvaiableUsages.get("node_2"); + assertDiskUsage(leastNode_2, node2FSInfo[0]); + assertDiskUsage(mostNode_2, node2FSInfo[0]); + + DiskUsage leastNode_3 = newLeastAvaiableUsages.get("node_3"); + DiskUsage mostNode_3 = newMostAvaiableUsages.get("node_3"); + assertDiskUsage(leastNode_3, node3FSInfo[0]); + assertDiskUsage(mostNode_3, node3FSInfo[1]); + } + + private void assertDiskUsage(DiskUsage usage, FsInfo.Path path) { + assertEquals(usage.toString(), usage.getPath(), path.getPath()); + assertEquals(usage.toString(), usage.getTotalBytes(), path.getTotal().bytes()); + assertEquals(usage.toString(), usage.getFreeBytes(), path.getAvailable().bytes()); + + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/core/src/test/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index 33ae26e6ebe..f3cfe2ef24a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/core/src/test/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; +import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDeciderTests; import org.elasticsearch.cluster.routing.allocation.decider.MockDiskUsagesIT; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -63,9 +64,9 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService { ClusterService clusterService, ThreadPool threadPool) { super(settings, nodeSettingsService, transportNodesStatsAction, transportIndicesStatsAction, clusterService, threadPool); this.clusterName = ClusterName.clusterNameFromSettings(settings); - stats[0] = MockDiskUsagesIT.makeStats("node_t1", new DiskUsage("node_t1", "n1", 100, 100)); - stats[1] = MockDiskUsagesIT.makeStats("node_t2", new DiskUsage("node_t2", "n2", 100, 100)); - stats[2] = MockDiskUsagesIT.makeStats("node_t3", new DiskUsage("node_t3", "n3", 100, 100)); + stats[0] = MockDiskUsagesIT.makeStats("node_t1", new DiskUsage("node_t1", "n1", "/dev/null", 100, 100)); + stats[1] = MockDiskUsagesIT.makeStats("node_t2", new DiskUsage("node_t2", "n2", "/dev/null", 100, 100)); + stats[2] = MockDiskUsagesIT.makeStats("node_t3", new DiskUsage("node_t3", "n3", "/dev/null", 100, 100)); } public void setN1Usage(String nodeName, DiskUsage newUsage) { @@ -92,4 +93,9 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService { // Not used, so noop return new CountDownLatch(0); } + + public ClusterInfo getClusterInfo() { + ClusterInfo clusterInfo = super.getClusterInfo(); + return new ClusterInfo(clusterInfo.getNodeLeastAvailableDiskUsages(), clusterInfo.getNodeMostAvailableDiskUsages(), clusterInfo.shardSizes, DiskThresholdDeciderTests.DEV_NULL_MAP); + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java index 2cbc364bafe..9a0cbb2bcca 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java @@ -243,4 +243,25 @@ public class RoutingTableTest extends ESAllocationTestCase { fail("Calling with non-existing index should be ignored at the moment"); } } + + public void testAllShardsForMultipleIndices() { + assertThat(this.emptyRoutingTable.allShards(new String[0]).size(), is(0)); + + assertThat(this.testRoutingTable.allShards(new String[]{TEST_INDEX_1}).size(), is(this.shardsPerIndex)); + + initPrimaries(); + assertThat(this.testRoutingTable.allShards(new String[]{TEST_INDEX_1}).size(), is(this.shardsPerIndex)); + + startInitializingShards(TEST_INDEX_1); + assertThat(this.testRoutingTable.allShards(new String[]{TEST_INDEX_1}).size(), is(this.shardsPerIndex)); + + startInitializingShards(TEST_INDEX_2); + assertThat(this.testRoutingTable.allShards(new String[]{TEST_INDEX_1, TEST_INDEX_2}).size(), is(this.totalNumberOfShards)); + + try { + this.testRoutingTable.allShards(new String[]{TEST_INDEX_1, "not_exists"}); + } catch (IndexNotFoundException e) { + fail("Calling with non-existing index should be ignored at the moment"); + } + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index e512fcdfbd3..e6a0ec4bc97 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -59,7 +59,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(Settings.EMPTY, new ClusterInfoService() { @Override public ClusterInfo getClusterInfo() { - return new ClusterInfo(Collections.EMPTY_MAP, Collections.EMPTY_MAP) { + return new ClusterInfo() { @Override public Long getShardSize(ShardRouting shardRouting) { if (shardRouting.index().equals("test") && shardRouting.shardId().getId() == 0) { @@ -118,7 +118,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { final AllocationService allocation = createAllocationService(Settings.EMPTY, new ClusterInfoService() { @Override public ClusterInfo getClusterInfo() { - return new ClusterInfo(Collections.EMPTY_MAP, Collections.EMPTY_MAP) { + return new ClusterInfo() { @Override public Long getShardSize(ShardRouting shardRouting) { if (shardRouting.index().equals("test") && shardRouting.shardId().getId() == 0) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index 56510384246..4dd88501ec2 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -66,7 +66,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase { new ClusterInfoService() { @Override public ClusterInfo getClusterInfo() { - return new ClusterInfo(Collections.EMPTY_MAP, Collections.EMPTY_MAP) { + return new ClusterInfo() { @Override public Long getShardSize(ShardRouting shardRouting) { if (shardRouting.index().equals("test")) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 41e995c367d..d73d07a070e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -65,15 +65,15 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build(); Map usages = new HashMap<>(); - usages.put("node1", new DiskUsage("node1", "node1", 100, 10)); // 90% used - usages.put("node2", new DiskUsage("node2", "node2", 100, 35)); // 65% used - usages.put("node3", new DiskUsage("node3", "node3", 100, 60)); // 40% used - usages.put("node4", new DiskUsage("node4", "node4", 100, 80)); // 20% used + usages.put("node1", new DiskUsage("node1", "node1", "/dev/null", 100, 10)); // 90% used + usages.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 35)); // 65% used + usages.put("node3", new DiskUsage("node3", "node3", "/dev/null", 100, 60)); // 40% used + usages.put("node4", new DiskUsage("node4", "node4", "/dev/null", 100, 80)); // 20% used Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 10L); // 10 bytes shardSizes.put("[test][0][r]", 10L); - final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes)); + final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes), DEV_NULL_MAP); AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -92,7 +92,6 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // noop } }; - AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") @@ -259,16 +258,16 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "9b").build(); Map usages = new HashMap<>(); - usages.put("node1", new DiskUsage("node1", "n1", 100, 10)); // 90% used - usages.put("node2", new DiskUsage("node2", "n2", 100, 10)); // 90% used - usages.put("node3", new DiskUsage("node3", "n3", 100, 60)); // 40% used - usages.put("node4", new DiskUsage("node4", "n4", 100, 80)); // 20% used - usages.put("node5", new DiskUsage("node5", "n5", 100, 85)); // 15% used + usages.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 10)); // 90% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 10)); // 90% used + usages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 60)); // 40% used + usages.put("node4", new DiskUsage("node4", "n4", "/dev/null", 100, 80)); // 20% used + usages.put("node5", new DiskUsage("node5", "n5", "/dev/null", 100, 85)); // 15% used Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 10L); // 10 bytes shardSizes.put("[test][0][r]", 10L); - final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes)); + final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes), DEV_NULL_MAP); AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -329,8 +328,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> nodeWithoutPrimary: {}", nodeWithoutPrimary); // Make node without the primary now habitable to replicas - usages.put(nodeWithoutPrimary, new DiskUsage(nodeWithoutPrimary, "", 100, 35)); // 65% used - final ClusterInfo clusterInfo2 = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes)); + usages.put(nodeWithoutPrimary, new DiskUsage(nodeWithoutPrimary, "", "/dev/null", 100, 35)); // 65% used + final ClusterInfo clusterInfo2 = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes), DEV_NULL_MAP); cis = new ClusterInfoService() { @Override public ClusterInfo getClusterInfo() { @@ -524,12 +523,12 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "71%").build(); Map usages = new HashMap<>(); - usages.put("node1", new DiskUsage("node1", "n1", 100, 31)); // 69% used - usages.put("node2", new DiskUsage("node2", "n2", 100, 1)); // 99% used + usages.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 31)); // 69% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 1)); // 99% used Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 10L); // 10 bytes - final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes)); + final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes), DEV_NULL_MAP); AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -590,13 +589,13 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.85).build(); Map usages = new HashMap<>(); - usages.put("node2", new DiskUsage("node2", "node2", 100, 50)); // 50% used - usages.put("node3", new DiskUsage("node3", "node3", 100, 0)); // 100% used + usages.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 50)); // 50% used + usages.put("node3", new DiskUsage("node3", "node3", "/dev/null", 100, 0)); // 100% used Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 10L); // 10 bytes shardSizes.put("[test][0][r]", 10L); // 10 bytes - final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes)); + final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes), DEV_NULL_MAP); AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -661,8 +660,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY); Map usages = new HashMap<>(); - usages.put("node2", new DiskUsage("node2", "n2", 100, 50)); // 50% used - usages.put("node3", new DiskUsage("node3", "n3", 100, 0)); // 100% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 50)); // 50% used + usages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 0)); // 100% used DiskUsage node1Usage = decider.averageUsage(rn, usages); assertThat(node1Usage.getTotalBytes(), equalTo(100L)); @@ -675,10 +674,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY); Map usages = new HashMap<>(); - usages.put("node2", new DiskUsage("node2", "n2", 100, 50)); // 50% used - usages.put("node3", new DiskUsage("node3", "n3", 100, 0)); // 100% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 50)); // 50% used + usages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 0)); // 100% used - Double after = decider.freeDiskPercentageAfterShardAssigned(new DiskUsage("node2", "n2", 100, 30), 11L); + Double after = decider.freeDiskPercentageAfterShardAssigned(new DiskUsage("node2", "n2", "/dev/null", 100, 30), 11L); assertThat(after, equalTo(19.0)); } @@ -691,16 +690,16 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build(); Map usages = new HashMap<>(); - usages.put("node1", new DiskUsage("node1", "n1", 100, 40)); // 60% used - usages.put("node2", new DiskUsage("node2", "n2", 100, 40)); // 60% used - usages.put("node2", new DiskUsage("node3", "n3", 100, 40)); // 60% used + usages.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40)); // 60% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 40)); // 60% used + usages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 40)); // 60% used Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 14L); // 14 bytes shardSizes.put("[test][0][r]", 14L); shardSizes.put("[test2][0][p]", 1L); // 1 bytes shardSizes.put("[test2][0][r]", 1L); - final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes)); + final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes), DEV_NULL_MAP); AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -797,13 +796,13 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available Map usages = new HashMap<>(); - usages.put("node1", new DiskUsage("node1", "n1", 100, 20)); // 80% used - usages.put("node2", new DiskUsage("node2", "n2", 100, 100)); // 0% used + usages.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 20)); // 80% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 100)); // 0% used Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 40L); shardSizes.put("[test][1][p]", 40L); - final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes)); + final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes), DEV_NULL_MAP); DiskThresholdDecider diskThresholdDecider = new DiskThresholdDecider(diskSettings); MetaData metaData = MetaData.builder() @@ -916,4 +915,26 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { rn.shardsWithState(RELOCATING), rn.shardsWithState(STARTED)); } + + public static final Map DEV_NULL_MAP = Collections.unmodifiableMap(new StaticValueMap("/dev/null")); + + // a test only map that always returns the same value no matter what key is passed + private static final class StaticValueMap extends AbstractMap { + + private final String value; + + private StaticValueMap(String value) { + this.value = value; + } + + @Override + public String get(Object key) { + return value; + } + + @Override + public Set> entrySet() { + throw new UnsupportedOperationException("this is a test-only map that only supports #get(Object key)"); + } + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 0be13948e42..853f669c6d5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -20,15 +20,15 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterInfoService; -import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingHelper; -import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.node.settings.NodeSettingsService; @@ -76,11 +76,11 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { applySettings.onRefreshSettings(newSettings); assertThat("high threshold bytes should be unset", - decider.getFreeBytesThresholdHigh(), equalTo(ByteSizeValue.parseBytesSizeValue("0b", "test"))); + decider.getFreeBytesThresholdHigh(), equalTo(ByteSizeValue.parseBytesSizeValue("0b", "test"))); assertThat("high threshold percentage should be changed", decider.getFreeDiskThresholdHigh(), equalTo(30.0d)); assertThat("low threshold bytes should be set to 500mb", - decider.getFreeBytesThresholdLow(), equalTo(ByteSizeValue.parseBytesSizeValue("500mb", "test"))); + decider.getFreeBytesThresholdLow(), equalTo(ByteSizeValue.parseBytesSizeValue("500mb", "test"))); assertThat("low threshold bytes should be unset", decider.getFreeDiskThresholdLow(), equalTo(0.0d)); assertThat("reroute interval should be changed to 30 seconds", @@ -89,13 +89,133 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { assertFalse("relocations should now be disabled", decider.isIncludeRelocations()); } + public void testCanAllocateUsesMaxAvailableSpace() { + NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; + DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); + + ShardRouting test_0 = ShardRouting.newUnassigned("test", 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + DiscoveryNode node_0 = new DiscoveryNode("node_0", DummyTransportAddress.INSTANCE, Version.CURRENT); + DiscoveryNode node_1 = new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT); + + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")) + .build(); + + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); + + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .put(node_0) + .put(node_1) + ).build(); + + // actual test -- after all that bloat :) + Map leastAvailableUsages = new HashMap<>(); + leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, 0)); // all full + leastAvailableUsages.put("node_1", new DiskUsage("node_1", "node_1", "_na_", 100, 0)); // all full + + Map mostAvailableUsage = new HashMap<>(); + mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, randomIntBetween(20, 100))); // 20 - 99 percent since after allocation there must be at least 10% left and shard is 10byte + mostAvailableUsage.put("node_1", new DiskUsage("node_1", "node_1", "_na_", 100, randomIntBetween(0, 10))); // this is weird and smells like a bug! it should be up to 20%? + + Map shardSizes = new HashMap<>(); + shardSizes.put("[test][0][p]", 10L); // 10 bytes + final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(leastAvailableUsages), Collections.unmodifiableMap(mostAvailableUsage), Collections.unmodifiableMap(shardSizes), Collections.EMPTY_MAP); + RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState.nodes(), clusterInfo); + assertEquals(mostAvailableUsage.toString(), Decision.YES, decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation)); + assertEquals(mostAvailableUsage.toString(), Decision.NO, decider.canAllocate(test_0, new RoutingNode("node_1", node_1), allocation)); + } + + public void testCanRemainUsesLeastAvailableSpace() { + NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; + DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); + Map shardRoutingMap = new HashMap<>(); + + DiscoveryNode node_0 = new DiscoveryNode("node_0", DummyTransportAddress.INSTANCE, Version.CURRENT); + DiscoveryNode node_1 = new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT); + + ShardRouting test_0 = ShardRouting.newUnassigned("test", 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRoutingHelper.initialize(test_0, node_0.getId()); + ShardRoutingHelper.moveToStarted(test_0); + shardRoutingMap.put(test_0, "/node0/least"); + + ShardRouting test_1 = ShardRouting.newUnassigned("test", 1, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRoutingHelper.initialize(test_1, node_1.getId()); + ShardRoutingHelper.moveToStarted(test_1); + shardRoutingMap.put(test_1, "/node1/least"); + + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")) + .build(); + + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); + + logger.info("--> adding two nodes"); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .put(node_0) + .put(node_1) + ).build(); + + // actual test -- after all that bloat :) + Map leastAvailableUsages = new HashMap<>(); + leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "/node0/least", 100, 10)); // 90% used + leastAvailableUsages.put("node_1", new DiskUsage("node_1", "node_1", "/node1/least", 100, 9)); // 91% used + + Map mostAvailableUsage = new HashMap<>(); + mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "/node0/most", 100, 90)); // 10% used + mostAvailableUsage.put("node_1", new DiskUsage("node_1", "node_1", "/node1/most", 100, 90)); // 10% used + + Map shardSizes = new HashMap<>(); + shardSizes.put("[test][0][p]", 10L); // 10 bytes + shardSizes.put("[test][1][p]", 10L); + shardSizes.put("[test][2][p]", 10L); + + final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(leastAvailableUsages), Collections.unmodifiableMap(mostAvailableUsage), Collections.unmodifiableMap(shardSizes), shardRoutingMap); + RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState.nodes(), clusterInfo); + assertEquals(Decision.YES, decider.canRemain(test_0, new RoutingNode("node_0", node_0), allocation)); + assertEquals(Decision.NO, decider.canRemain(test_1, new RoutingNode("node_1", node_1), allocation)); + try { + decider.canRemain(test_0, new RoutingNode("node_1", node_1), allocation); + fail("not allocated on this node"); + } catch (IllegalArgumentException ex) { + // not allocated on that node + } + try { + decider.canRemain(test_1, new RoutingNode("node_0", node_0), allocation); + fail("not allocated on this node"); + } catch (IllegalArgumentException ex) { + // not allocated on that node + } + + ShardRouting test_2 = ShardRouting.newUnassigned("test", 2, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRoutingHelper.initialize(test_2, node_1.getId()); + ShardRoutingHelper.moveToStarted(test_2); + shardRoutingMap.put(test_2, "/node1/most"); + assertEquals("can stay since allocated on a different path with enough space", Decision.YES, decider.canRemain(test_2, new RoutingNode("node_1", node_1), allocation)); + + ShardRouting test_3 = ShardRouting.newUnassigned("test", 3, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRoutingHelper.initialize(test_3, node_1.getId()); + ShardRoutingHelper.moveToStarted(test_3); + assertEquals("can stay since we don't have information about this shard", Decision.YES, decider.canRemain(test_2, new RoutingNode("node_1", node_1), allocation)); + } + + public void testShardSizeAndRelocatingSize() { Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][r]", 10L); shardSizes.put("[test][1][r]", 100L); shardSizes.put("[test][2][r]", 1000L); shardSizes.put("[other][0][p]", 10000L); - ClusterInfo info = new ClusterInfo(Collections.EMPTY_MAP, shardSizes); + ClusterInfo info = new ClusterInfo(Collections.EMPTY_MAP, Collections.EMPTY_MAP, shardSizes, DiskThresholdDeciderTests.DEV_NULL_MAP); ShardRouting test_0 = ShardRouting.newUnassigned("test", 0, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); ShardRoutingHelper.initialize(test_0, "node1"); ShardRoutingHelper.moveToStarted(test_0); @@ -115,8 +235,10 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { assertEquals(10l, DiskThresholdDecider.getShardSize(test_0, info)); RoutingNode node = new RoutingNode("node1", new DiscoveryNode("node1", LocalTransportAddress.PROTO, Version.CURRENT), Arrays.asList(test_0, test_1.buildTargetRelocatingShard(), test_2)); - assertEquals(100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false)); - assertEquals(90l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true)); + assertEquals(100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false, "/dev/null")); + assertEquals(90l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/null")); + assertEquals(0l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/some/other/dev")); + assertEquals(0l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/some/other/dev")); ShardRouting test_3 = ShardRouting.newUnassigned("test", 3, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); ShardRoutingHelper.initialize(test_3, "node1"); @@ -132,11 +254,11 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { node = new RoutingNode("node1", new DiscoveryNode("node1", LocalTransportAddress.PROTO, Version.CURRENT), Arrays.asList(test_0, test_1.buildTargetRelocatingShard(), test_2, other_0.buildTargetRelocatingShard())); if (other_0.primary()) { - assertEquals(10100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false)); - assertEquals(10090l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true)); + assertEquals(10100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false, "/dev/null")); + assertEquals(10090l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/null")); } else { - assertEquals(100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false)); - assertEquals(90l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true)); + assertEquals(100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false, "/dev/null")); + assertEquals(90l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/null")); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index e5a14d29a73..7be0dc22115 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -33,9 +33,11 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.monitor.fs.FsInfo; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; +import java.util.Collection; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -52,13 +54,17 @@ public class MockDiskUsagesIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - // Use the mock internal cluster info service, which has fake-able disk usages - .extendArray("plugin.types", MockInternalClusterInfoService.TestPlugin.class.getName()) // Update more frequently .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "1s") .build(); } + @Override + protected Collection> nodePlugins() { + // Use the mock internal cluster info service, which has fake-able disk usages + return pluginList(MockInternalClusterInfoService.TestPlugin.class); + } + @Test //@TestLogging("org.elasticsearch.cluster:TRACE,org.elasticsearch.cluster.routing.allocation.decider:TRACE") public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { @@ -76,9 +82,9 @@ public class MockDiskUsagesIT extends ESIntegTestCase { // Start with all nodes at 50% usage final MockInternalClusterInfoService cis = (MockInternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); - cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", 100, 50)); - cis.setN2Usage(nodes.get(1), new DiskUsage(nodes.get(1), "n2", 100, 50)); - cis.setN3Usage(nodes.get(2), new DiskUsage(nodes.get(2), "n3", 100, 50)); + cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", "/dev/null", 100, 50)); + cis.setN2Usage(nodes.get(1), new DiskUsage(nodes.get(1), "n2", "/dev/null", 100, 50)); + cis.setN3Usage(nodes.get(2), new DiskUsage(nodes.get(2), "n3", "/dev/null", 100, 50)); client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, randomFrom("20b", "80%")) @@ -97,8 +103,8 @@ public class MockDiskUsagesIT extends ESIntegTestCase { @Override public void run() { ClusterInfo info = cis.getClusterInfo(); - logger.info("--> got: {} nodes", info.getNodeDiskUsages().size()); - assertThat(info.getNodeDiskUsages().size(), greaterThan(0)); + logger.info("--> got: {} nodes", info.getNodeLeastAvailableDiskUsages().size()); + assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThan(0)); } }); @@ -113,9 +119,9 @@ public class MockDiskUsagesIT extends ESIntegTestCase { } // Update the disk usages so one node has now passed the high watermark - cis.setN1Usage(realNodeNames.get(0), new DiskUsage(nodes.get(0), "n1", 100, 50)); - cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", 100, 50)); - cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", 100, 0)); // nothing free on node3 + cis.setN1Usage(realNodeNames.get(0), new DiskUsage(nodes.get(0), "n1", "_na_", 100, 50)); + cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50)); + cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 0)); // nothing free on node3 // Retrieve the count of shards on each node final Map nodesToShardCount = newHashMap(); @@ -138,9 +144,9 @@ public class MockDiskUsagesIT extends ESIntegTestCase { }); // Update the disk usages so one node is now back under the high watermark - cis.setN1Usage(realNodeNames.get(0), new DiskUsage(nodes.get(0), "n1", 100, 50)); - cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", 100, 50)); - cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", 100, 50)); // node3 has free space now + cis.setN1Usage(realNodeNames.get(0), new DiskUsage(nodes.get(0), "n1", "_na_", 100, 50)); + cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50)); + cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 50)); // node3 has free space now // Retrieve the count of shards on each node nodesToShardCount.clear(); @@ -166,7 +172,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase { /** Create a fake NodeStats for the given node and usage */ public static NodeStats makeStats(String nodeName, DiskUsage usage) { FsInfo.Path[] paths = new FsInfo.Path[1]; - FsInfo.Path path = new FsInfo.Path("/path.data", null, + FsInfo.Path path = new FsInfo.Path("/dev/null", null, usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeBytes()); paths[0] = path; FsInfo fsInfo = new FsInfo(System.currentTimeMillis(), paths); diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java index c4bd92e5084..b7670eaafe8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java @@ -42,11 +42,8 @@ import static org.hamcrest.Matchers.nullValue; public class SettingsFilteringIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", SettingsFilteringPlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(SettingsFilteringPlugin.class); } public static class SettingsFilteringPlugin extends Plugin { diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index c18abdc5bef..34f4a860e39 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -48,6 +48,7 @@ import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; @@ -82,7 +83,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { - return discoveryConfig.node(nodeOrdinal); + return discoveryConfig.nodeSettings(nodeOrdinal); } @Before @@ -134,9 +135,13 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { .put("transport.bind_host", "127.0.0.1") .put("transport.publish_host", "127.0.0.1") .put("gateway.local.list_timeout", "10s") // still long to induce failures but to long so test won't time out - .put("plugin.types", MockTransportService.TestPlugin.class.getName()) .build(); + @Override + protected Collection> nodePlugins() { + return pluginList(MockTransportService.TestPlugin.class); + } + private void configureUnicastCluster(int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException { if (minimumMasterNode < 0) { minimumMasterNode = numberOfNodes / 2 + 1; diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java index f11f2ea0f27..ec7d81b0409 100644 --- a/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java @@ -43,7 +43,7 @@ public class ZenUnicastDiscoveryIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { - return discoveryConfig.node(nodeOrdinal); + return discoveryConfig.nodeSettings(nodeOrdinal); } @Before diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index cc293375a2c..55204365688 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -42,7 +42,11 @@ import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.BytesTransportRequest; +import org.elasticsearch.transport.EmptyTransportResponseHandler; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; import org.hamcrest.Matchers; import org.junit.Test; @@ -112,7 +116,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { createIndex("test"); ensureSearchable("test"); RecoveryResponse r = client().admin().indices().prepareRecoveries("test").get(); - int numRecoveriesBeforeNewMaster = r.shardResponses().get("test").size(); + int numRecoveriesBeforeNewMaster = r.shardRecoveryStates().get("test").size(); final String oldMaster = internalCluster().getMasterName(); internalCluster().stopCurrentMasterNode(); @@ -127,7 +131,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { ensureSearchable("test"); r = client().admin().indices().prepareRecoveries("test").get(); - int numRecoveriesAfterNewMaster = r.shardResponses().get("test").size(); + int numRecoveriesAfterNewMaster = r.shardRecoveryStates().get("test").size(); assertThat(numRecoveriesAfterNewMaster, equalTo(numRecoveriesBeforeNewMaster)); } diff --git a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java index f1c6d476eb5..529b60562bf 100644 --- a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java +++ b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java @@ -35,7 +35,10 @@ import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; /** */ @@ -146,7 +149,7 @@ public class ShardInfoIT extends ESIntegTestCase { RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("idx") .setActiveOnly(true) .get(); - assertThat(recoveryResponse.shardResponses().get("idx").size(), equalTo(0)); + assertThat(recoveryResponse.shardRecoveryStates().get("idx").size(), equalTo(0)); } }); } diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java index 745c98f664e..fbd8b973fad 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.gateway; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -99,8 +98,7 @@ public class RecoveryBackwardsCompatibilityIT extends ESBackcompatTestCase { HashMap map = new HashMap<>(); map.put("details", "true"); final ToXContent.Params params = new ToXContent.MapParams(map); - for (ShardRecoveryResponse response : recoveryResponse.shardResponses().get("test")) { - RecoveryState recoveryState = response.recoveryState(); + for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { final String recoverStateAsJSON = XContentHelper.toString(recoveryState, params); if (!recoveryState.getPrimary()) { RecoveryState.Index index = recoveryState.getIndex(); diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index aa229305a2f..341139ba88b 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.client.Client; @@ -400,8 +399,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { assertSyncIdsNotNull(); } RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); - for (ShardRecoveryResponse response : recoveryResponse.shardResponses().get("test")) { - RecoveryState recoveryState = response.recoveryState(); + for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { long recovered = 0; for (RecoveryState.File file : recoveryState.getIndex().fileDetails()) { if (file.name().startsWith("segments")) { @@ -410,7 +408,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { } if (!recoveryState.getPrimary() && (useSyncIds == false)) { logger.info("--> replica shard {} recovered from {} to {}, recovered {}, reuse {}", - response.getShardId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(), + recoveryState.getShardId().getId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(), recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); assertThat("no bytes should be recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered)); assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0l)); @@ -422,7 +420,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { } else { if (useSyncIds && !recoveryState.getPrimary()) { logger.info("--> replica shard {} recovered from {} to {} using sync id, recovered {}, reuse {}", - response.getShardId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(), + recoveryState.getShardId().getId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(), recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); } assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0l)); diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index 03100efde0a..3b955b271af 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -38,6 +38,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShadowIndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.snapshots.SnapshotState; @@ -52,6 +53,7 @@ import org.junit.Test; import java.io.IOException; import java.nio.file.Path; +import java.util.Collection; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; @@ -83,6 +85,11 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { .build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(MockTransportService.TestPlugin.class); + } + public void testCannotCreateWithBadPath() throws Exception { Settings nodeSettings = nodeSettings("/badpath"); internalCluster().startNodesAsync(1, nodeSettings).get(); @@ -419,7 +426,6 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { Path dataPath = createTempDir(); Settings nodeSettings = Settings.builder() .put("node.add_id_to_custom_path", false) - .put("plugin.types", MockTransportService.TestPlugin.class.getName()) .put("path.shared_data", dataPath) .build(); diff --git a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java index bdbcd45c7e4..c5cab481fae 100644 --- a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java +++ b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java @@ -33,12 +33,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.fd.FaultDetection; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportService; import org.junit.Test; +import java.util.Collection; import java.util.List; import static org.elasticsearch.cluster.routing.ShardRoutingState.*; @@ -57,9 +59,13 @@ public class TransportIndexFailuresIT extends ESIntegTestCase { .put(FaultDetection.SETTING_PING_RETRIES, "1") // <-- for hitting simulated network failures quickly .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly .put("discovery.zen.minimum_master_nodes", 1) - .put("plugin.types", MockTransportService.TestPlugin.class.getName()) .build(); + @Override + protected Collection> nodePlugins() { + return pluginList(MockTransportService.TestPlugin.class); + } + @Override protected int numberOfShards() { return 1; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 454f8e814d4..fccf642e9df 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -19,12 +19,9 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.common.xcontent.json.JsonXContentParser; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; // TODO: make this a real unit test @@ -37,11 +34,12 @@ public class DocumentParserTests extends ESSingleNodeTestCase { DocumentMapper mapper = mapperParser.parse(mapping); BytesReference bytes = XContentFactory.jsonBuilder() - .startObject() + .startObject().startObject("foo") .field("field", "1234") - .endObject().bytes(); + .endObject().endObject().bytes(); ParsedDocument doc = mapper.parse("test", "type", "1", bytes); assertNull(doc.rootDoc().getField("field")); + assertNotNull(doc.rootDoc().getField(UidFieldMapper.NAME)); } public void testFieldDisabled() throws Exception { @@ -60,5 +58,6 @@ public class DocumentParserTests extends ESSingleNodeTestCase { ParsedDocument doc = mapper.parse("test", "type", "1", bytes); assertNull(doc.rootDoc().getField("foo")); assertNotNull(doc.rootDoc().getField("bar")); + assertNotNull(doc.rootDoc().getField(UidFieldMapper.NAME)); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java index 101b3d6b16b..42a9df6632e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java @@ -25,19 +25,20 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; +import java.util.Arrays; +import java.util.Collection; + import static org.hamcrest.Matchers.equalTo; public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", ExternalMapperPlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(ExternalMapperPlugin.class); } @Test diff --git a/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java b/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java index e4d345e80b0..1f0743abd96 100644 --- a/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java +++ b/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java @@ -27,10 +27,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Before; import org.junit.Test; +import java.util.Collection; + import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -39,9 +42,8 @@ import static org.hamcrest.Matchers.instanceOf; public class CustomQueryParserIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", DummyQueryParserPlugin.class.getName()).build(); + protected Collection> nodePlugins() { + return pluginList(DummyQueryParserPlugin.class); } @Before diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 0e997214033..ea72e4998b4 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; @@ -67,7 +68,10 @@ import java.util.HashSet; import java.util.Set; import java.util.concurrent.ExecutionException; -import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.metadata.IndexMetaData.EMPTY_PARAMS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -555,7 +559,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService("test"); IndexShard shard = test.shard(0); - ShardStats stats = new ShardStats(shard, new CommonStatsFlags()); + ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(shard, new CommonStatsFlags()), shard.commitStats()); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath()); assertEquals(shard.shardPath().isCustomDataPath(), stats.isCustomDataPath()); diff --git a/core/src/test/java/org/elasticsearch/index/shard/NewPathForShardTest.java b/core/src/test/java/org/elasticsearch/index/shard/NewPathForShardTest.java new file mode 100644 index 00000000000..d89c328d9dd --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/shard/NewPathForShardTest.java @@ -0,0 +1,236 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import com.carrotsearch.randomizedtesting.annotations.Repeat; + +import org.apache.lucene.mockfile.FilterFileSystem; +import org.apache.lucene.mockfile.FilterFileSystemProvider; +import org.apache.lucene.mockfile.FilterPath; +import org.apache.lucene.util.Constants; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment.NodePath; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.Field; +import java.nio.file.FileStore; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttributeView; +import java.nio.file.attribute.FileStoreAttributeView; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; + +/** Separate test class from ShardPathTests because we need static (BeforeClass) setup to install mock filesystems... */ +@SuppressForbidden(reason = "ProviderMismatchException if I try to use PathUtils.getDefault instead") +public class NewPathForShardTest extends ESTestCase { + + // Sneakiness to install mock file stores so we can pretend how much free space we have on each path.data: + private static MockFileStore aFileStore = new MockFileStore("mocka"); + private static MockFileStore bFileStore = new MockFileStore("mockb"); + private static FileSystem origFileSystem; + private static String aPathPart = File.separator + 'a' + File.separator; + private static String bPathPart = File.separator + 'b' + File.separator; + + @BeforeClass + public static void installMockUsableSpaceFS() throws Exception { + // Necessary so when Environment.clinit runs, to gather all FileStores, it sees ours: + origFileSystem = FileSystems.getDefault(); + + Field field = PathUtils.class.getDeclaredField("DEFAULT"); + field.setAccessible(true); + FileSystem mock = new MockUsableSpaceFileSystemProvider().getFileSystem(getBaseTempDirForTestClass().toUri()); + field.set(null, mock); + assertEquals(mock, PathUtils.getDefaultFileSystem()); + } + + @AfterClass + public static void removeMockUsableSpaceFS() throws Exception { + Field field = PathUtils.class.getDeclaredField("DEFAULT"); + field.setAccessible(true); + field.set(null, origFileSystem); + origFileSystem = null; + aFileStore = null; + bFileStore = null; + } + + /** Mock file system that fakes usable space for each FileStore */ + @SuppressForbidden(reason = "ProviderMismatchException if I try to use PathUtils.getDefault instead") + static class MockUsableSpaceFileSystemProvider extends FilterFileSystemProvider { + + public MockUsableSpaceFileSystemProvider() { + super("mockusablespace://", FileSystems.getDefault()); + final List fileStores = new ArrayList<>(); + fileStores.add(aFileStore); + fileStores.add(bFileStore); + fileSystem = new FilterFileSystem(this, origFileSystem) { + @Override + public Iterable getFileStores() { + return fileStores; + } + }; + } + + @Override + public FileStore getFileStore(Path path) throws IOException { + if (path.toString().contains(aPathPart)) { + return aFileStore; + } else { + return bFileStore; + } + } + } + + static class MockFileStore extends FileStore { + + public long usableSpace; + + private final String desc; + + public MockFileStore(String desc) { + this.desc = desc; + } + + @Override + public String type() { + return "mock"; + } + + @Override + public String name() { + return desc; + } + + @Override + public String toString() { + return desc; + } + + @Override + public boolean isReadOnly() { + return false; + } + + @Override + public long getTotalSpace() throws IOException { + return usableSpace*3; + } + + @Override + public long getUsableSpace() throws IOException { + return usableSpace; + } + + @Override + public long getUnallocatedSpace() throws IOException { + return usableSpace*2; + } + + @Override + public boolean supportsFileAttributeView(Class type) { + return false; + } + + @Override + public boolean supportsFileAttributeView(String name) { + return false; + } + + @Override + public V getFileStoreAttributeView(Class type) { + return null; + } + + @Override + public Object getAttribute(String attribute) throws IOException { + return null; + } + } + + public void testSelectNewPathForShard() throws Exception { + assumeFalse("Consistenty fails on windows ('could not remove the following files')", Constants.WINDOWS); + Path path = PathUtils.get(createTempDir().toString()); + + // Use 2 data paths: + String[] paths = new String[] {path.resolve("a").toString(), + path.resolve("b").toString()}; + + Settings settings = Settings.builder() + .put("path.home", path) + .putArray("path.data", paths).build(); + NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings)); + + // Make sure all our mocking above actually worked: + NodePath[] nodePaths = nodeEnv.nodePaths(); + assertEquals(2, nodePaths.length); + + assertEquals("mocka", nodePaths[0].fileStore.name()); + assertEquals("mockb", nodePaths[1].fileStore.name()); + + // Path a has lots of free space, but b has little, so new shard should go to a: + aFileStore.usableSpace = 100000; + bFileStore.usableSpace = 1000; + + ShardId shardId = new ShardId("index", 0); + ShardPath result = ShardPath.selectNewPathForShard(nodeEnv, shardId, Settings.EMPTY, 100, Collections.emptyMap()); + assertTrue(result.getDataPath().toString().contains(aPathPart)); + + // Test the reverse: b has lots of free space, but a has little, so new shard should go to b: + aFileStore.usableSpace = 1000; + bFileStore.usableSpace = 100000; + + shardId = new ShardId("index", 0); + result = ShardPath.selectNewPathForShard(nodeEnv, shardId, Settings.EMPTY, 100, Collections.emptyMap()); + assertTrue(result.getDataPath().toString().contains(bPathPart)); + + // Now a and be have equal usable space; we allocate two shards to the node, and each should go to different paths: + aFileStore.usableSpace = 100000; + bFileStore.usableSpace = 100000; + + Map dataPathToShardCount = new HashMap<>(); + ShardPath result1 = ShardPath.selectNewPathForShard(nodeEnv, shardId, Settings.EMPTY, 100, dataPathToShardCount); + dataPathToShardCount.put(NodeEnvironment.shardStatePathToDataPath(result1.getDataPath()), 1); + ShardPath result2 = ShardPath.selectNewPathForShard(nodeEnv, shardId, Settings.EMPTY, 100, dataPathToShardCount); + + // #11122: this was the original failure: on a node with 2 disks that have nearly equal + // free space, we would always allocate all N incoming shards to the one path that + // had the most free space, never using the other drive unless new shards arrive + // after the first shards started using storage: + assertNotEquals(result1.getDataPath(), result2.getDataPath()); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 6ca68c6f495..62d1b42d459 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -70,6 +70,7 @@ import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoveryTarget; import org.elasticsearch.monitor.fs.FsInfo; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -91,6 +92,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -117,7 +119,6 @@ public class CorruptedFileIT extends ESIntegTestCase { // we really need local GW here since this also checks for corruption etc. // and we need to make sure primaries are not just trashed if we don't have replicas .put(super.nodeSettings(nodeOrdinal)) - .extendArray("plugin.types", MockTransportService.TestPlugin.class.getName()) // speed up recoveries .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, 10) .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 10) @@ -125,6 +126,11 @@ public class CorruptedFileIT extends ESIntegTestCase { .build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(MockTransportService.TestPlugin.class); + } + /** * Tests that we can actually recover from a corruption on the primary given that we have replica shards around. */ diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index f0fda717f96..c5158b782e3 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.monitor.fs.FsInfo; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.engine.MockEngineSupport; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -46,6 +47,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Set; import java.util.TreeSet; @@ -62,12 +64,10 @@ import static org.hamcrest.Matchers.notNullValue; public class CorruptedTranslogIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - // we really need local GW here since this also checks for corruption etc. - // and we need to make sure primaries are not just trashed if we don't have replicas - .put(super.nodeSettings(nodeOrdinal)) - .extendArray("plugin.types", MockTransportService.TestPlugin.class.getName()).build(); + protected Collection> nodePlugins() { + // we really need local GW here since this also checks for corruption etc. + // and we need to make sure primaries are not just trashed if we don't have replicas + return pluginList(MockTransportService.TestPlugin.class); } @Test diff --git a/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java index 9fe2a0fd629..91d17f45038 100644 --- a/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -36,6 +37,7 @@ import org.elasticsearch.transport.*; import org.junit.Test; import java.io.IOException; +import java.util.Collection; import java.util.HashSet; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -51,11 +53,8 @@ import static org.hamcrest.Matchers.greaterThan; public class ExceptionRetryIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .extendArray("plugin.types", MockTransportService.TestPlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(MockTransportService.TestPlugin.class); } @Override diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java index c27a507d5c1..bc2b71eb3f1 100644 --- a/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java @@ -24,11 +24,13 @@ import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESBackcompatTestCase; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; import java.lang.reflect.Field; +import java.util.Collection; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -44,11 +46,8 @@ import static org.hamcrest.Matchers.notNullValue; public class PreBuiltAnalyzerIntegrationIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", DummyAnalysisPlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(DummyAnalysisPlugin.class); } @Test diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index 6ff35daf55a..234f3eada9a 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -45,6 +45,7 @@ import org.junit.Test; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Random; import java.util.concurrent.ExecutionException; @@ -57,6 +58,11 @@ import static org.hamcrest.Matchers.equalTo; */ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return pluginList(RandomExceptionDirectoryReaderWrapper.TestPlugin.class); + } + @Test public void testBreakerWithRandomExceptions() throws IOException, InterruptedException, ExecutionException { for (NodeStats node : client().admin().cluster().prepareNodesStats() @@ -107,7 +113,6 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase { Settings.Builder settings = settingsBuilder() .put(indexSettings()) - .extendArray("plugin.types", RandomExceptionDirectoryReaderWrapper.TestPlugin.class.getName()) .put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate) .put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate) .put(MockEngineSupport.WRAP_READER_RATIO, 1.0d); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index c2860c75033..396d20e5915 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRes import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -47,17 +46,24 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState.Stage; import org.elasticsearch.indices.recovery.RecoveryState.Type; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.store.MockFSDirectoryService; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; import org.junit.Test; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -67,7 +73,13 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.ESIntegTestCase.Scope; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; /** * @@ -85,6 +97,10 @@ public class IndexRecoveryIT extends ESIntegTestCase { private static final int SHARD_COUNT = 1; private static final int REPLICA_COUNT = 0; + @Override + protected Collection> nodePlugins() { + return pluginList(MockTransportService.TestPlugin.class); + } private void assertRecoveryStateWithoutStage(RecoveryState state, int shardId, Type type, String sourceNode, String targetNode, boolean hasRestoreSource) { @@ -155,18 +171,17 @@ public class IndexRecoveryIT extends ESIntegTestCase { logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); - assertThat(response.shardResponses().size(), equalTo(SHARD_COUNT)); - assertThat(response.shardResponses().get(INDEX_NAME).size(), equalTo(1)); + assertThat(response.shardRecoveryStates().size(), equalTo(SHARD_COUNT)); + assertThat(response.shardRecoveryStates().get(INDEX_NAME).size(), equalTo(1)); - List shardResponses = response.shardResponses().get(INDEX_NAME); - assertThat(shardResponses.size(), equalTo(1)); + List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); + assertThat(recoveryStates.size(), equalTo(1)); - ShardRecoveryResponse shardResponse = shardResponses.get(0); - RecoveryState state = shardResponse.recoveryState(); + RecoveryState recoveryState = recoveryStates.get(0); - assertRecoveryState(state, 0, Type.STORE, Stage.DONE, node, node, false); + assertRecoveryState(recoveryState, 0, Type.STORE, Stage.DONE, node, node, false); - validateIndexRecoveryState(state.getIndex()); + validateIndexRecoveryState(recoveryState.getIndex()); } @Test @@ -183,8 +198,8 @@ public class IndexRecoveryIT extends ESIntegTestCase { logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).setActiveOnly(true).execute().actionGet(); - List shardResponses = response.shardResponses().get(INDEX_NAME); - assertThat(shardResponses.size(), equalTo(0)); // Should not expect any responses back + List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); + assertThat(recoveryStates.size(), equalTo(0)); // Should not expect any responses back } @Test @@ -209,23 +224,23 @@ public class IndexRecoveryIT extends ESIntegTestCase { RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); // we should now have two total shards, one primary and one replica - List shardResponses = response.shardResponses().get(INDEX_NAME); - assertThat(shardResponses.size(), equalTo(2)); + List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); + assertThat(recoveryStates.size(), equalTo(2)); - List nodeAResponses = findRecoveriesForTargetNode(nodeA, shardResponses); + List nodeAResponses = findRecoveriesForTargetNode(nodeA, recoveryStates); assertThat(nodeAResponses.size(), equalTo(1)); - List nodeBResponses = findRecoveriesForTargetNode(nodeB, shardResponses); + List nodeBResponses = findRecoveriesForTargetNode(nodeB, recoveryStates); assertThat(nodeBResponses.size(), equalTo(1)); // validate node A recovery - ShardRecoveryResponse nodeAShardResponse = nodeAResponses.get(0); - assertRecoveryState(nodeAShardResponse.recoveryState(), 0, Type.STORE, Stage.DONE, nodeA, nodeA, false); - validateIndexRecoveryState(nodeAShardResponse.recoveryState().getIndex()); + RecoveryState nodeARecoveryState = nodeAResponses.get(0); + assertRecoveryState(nodeARecoveryState, 0, Type.STORE, Stage.DONE, nodeA, nodeA, false); + validateIndexRecoveryState(nodeARecoveryState.getIndex()); // validate node B recovery - ShardRecoveryResponse nodeBShardResponse = nodeBResponses.get(0); - assertRecoveryState(nodeBShardResponse.recoveryState(), 0, Type.REPLICA, Stage.DONE, nodeA, nodeB, false); - validateIndexRecoveryState(nodeBShardResponse.recoveryState().getIndex()); + RecoveryState nodeBRecoveryState = nodeBResponses.get(0); + assertRecoveryState(nodeBRecoveryState, 0, Type.REPLICA, Stage.DONE, nodeA, nodeB, false); + validateIndexRecoveryState(nodeBRecoveryState.getIndex()); } @Test @@ -266,17 +281,17 @@ public class IndexRecoveryIT extends ESIntegTestCase { logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); - List shardResponses = response.shardResponses().get(INDEX_NAME); - List nodeAResponses = findRecoveriesForTargetNode(nodeA, shardResponses); - assertThat(nodeAResponses.size(), equalTo(1)); - List nodeBResponses = findRecoveriesForTargetNode(nodeB, shardResponses); - assertThat(nodeBResponses.size(), equalTo(1)); + List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); + List nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); + assertThat(nodeARecoveryStates.size(), equalTo(1)); + List nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates); + assertThat(nodeBRecoveryStates.size(), equalTo(1)); - assertRecoveryState(nodeAResponses.get(0).recoveryState(), 0, Type.STORE, Stage.DONE, nodeA, nodeA, false); - validateIndexRecoveryState(nodeAResponses.get(0).recoveryState().getIndex()); + assertRecoveryState(nodeARecoveryStates.get(0), 0, Type.STORE, Stage.DONE, nodeA, nodeA, false); + validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex()); - assertOnGoingRecoveryState(nodeBResponses.get(0).recoveryState(), 0, Type.RELOCATION, nodeA, nodeB, false); - validateIndexRecoveryState(nodeBResponses.get(0).recoveryState().getIndex()); + assertOnGoingRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, nodeA, nodeB, false); + validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex()); logger.info("--> request node recovery stats"); NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get(); @@ -325,11 +340,11 @@ public class IndexRecoveryIT extends ESIntegTestCase { response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); - shardResponses = response.shardResponses().get(INDEX_NAME); - assertThat(shardResponses.size(), equalTo(1)); + recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); + assertThat(recoveryStates.size(), equalTo(1)); - assertRecoveryState(shardResponses.get(0).recoveryState(), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false); - validateIndexRecoveryState(shardResponses.get(0).recoveryState().getIndex()); + assertRecoveryState(recoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false); + validateIndexRecoveryState(recoveryStates.get(0).getIndex()); statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get(); assertThat(statsResponse.getNodes(), arrayWithSize(2)); @@ -377,45 +392,45 @@ public class IndexRecoveryIT extends ESIntegTestCase { .execute().actionGet().getState(); response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); - shardResponses = response.shardResponses().get(INDEX_NAME); + recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); - nodeAResponses = findRecoveriesForTargetNode(nodeA, shardResponses); - assertThat(nodeAResponses.size(), equalTo(1)); - nodeBResponses = findRecoveriesForTargetNode(nodeB, shardResponses); - assertThat(nodeBResponses.size(), equalTo(1)); - List nodeCResponses = findRecoveriesForTargetNode(nodeC, shardResponses); - assertThat(nodeCResponses.size(), equalTo(1)); + nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); + assertThat(nodeARecoveryStates.size(), equalTo(1)); + nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates); + assertThat(nodeBRecoveryStates.size(), equalTo(1)); + List nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates); + assertThat(nodeCRecoveryStates.size(), equalTo(1)); - assertRecoveryState(nodeAResponses.get(0).recoveryState(), 0, Type.REPLICA, Stage.DONE, nodeB, nodeA, false); - validateIndexRecoveryState(nodeAResponses.get(0).recoveryState().getIndex()); + assertRecoveryState(nodeARecoveryStates.get(0), 0, Type.REPLICA, Stage.DONE, nodeB, nodeA, false); + validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex()); - assertRecoveryState(nodeBResponses.get(0).recoveryState(), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false); - validateIndexRecoveryState(nodeBResponses.get(0).recoveryState().getIndex()); + assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false); + validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex()); // relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B) - assertOnGoingRecoveryState(nodeCResponses.get(0).recoveryState(), 0, Type.REPLICA, nodeB, nodeC, false); - validateIndexRecoveryState(nodeCResponses.get(0).recoveryState().getIndex()); + assertOnGoingRecoveryState(nodeCRecoveryStates.get(0), 0, Type.REPLICA, nodeB, nodeC, false); + validateIndexRecoveryState(nodeCRecoveryStates.get(0).getIndex()); logger.info("--> speeding up recoveries"); restoreRecoverySpeed(); ensureGreen(); response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); - shardResponses = response.shardResponses().get(INDEX_NAME); + recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); - nodeAResponses = findRecoveriesForTargetNode(nodeA, shardResponses); - assertThat(nodeAResponses.size(), equalTo(0)); - nodeBResponses = findRecoveriesForTargetNode(nodeB, shardResponses); - assertThat(nodeBResponses.size(), equalTo(1)); - nodeCResponses = findRecoveriesForTargetNode(nodeC, shardResponses); - assertThat(nodeCResponses.size(), equalTo(1)); + nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); + assertThat(nodeARecoveryStates.size(), equalTo(0)); + nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates); + assertThat(nodeBRecoveryStates.size(), equalTo(1)); + nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates); + assertThat(nodeCRecoveryStates.size(), equalTo(1)); - assertRecoveryState(nodeBResponses.get(0).recoveryState(), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false); - validateIndexRecoveryState(nodeBResponses.get(0).recoveryState().getIndex()); + assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false); + validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex()); // relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B) - assertRecoveryState(nodeCResponses.get(0).recoveryState(), 0, Type.REPLICA, Stage.DONE, nodeB, nodeC, false); - validateIndexRecoveryState(nodeCResponses.get(0).recoveryState().getIndex()); + assertRecoveryState(nodeCRecoveryStates.get(0), 0, Type.REPLICA, Stage.DONE, nodeB, nodeC, false); + validateIndexRecoveryState(nodeCRecoveryStates.get(0).getIndex()); } @Test @@ -457,24 +472,24 @@ public class IndexRecoveryIT extends ESIntegTestCase { logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); - for (Map.Entry> shardRecoveryResponse : response.shardResponses().entrySet()) { + for (Map.Entry> indexRecoveryStates : response.shardRecoveryStates().entrySet()) { - assertThat(shardRecoveryResponse.getKey(), equalTo(INDEX_NAME)); - List shardRecoveryResponses = shardRecoveryResponse.getValue(); - assertThat(shardRecoveryResponses.size(), equalTo(totalShards)); + assertThat(indexRecoveryStates.getKey(), equalTo(INDEX_NAME)); + List recoveryStates = indexRecoveryStates.getValue(); + assertThat(recoveryStates.size(), equalTo(totalShards)); - for (ShardRecoveryResponse shardResponse : shardRecoveryResponses) { - assertRecoveryState(shardResponse.recoveryState(), 0, Type.SNAPSHOT, Stage.DONE, null, nodeA, true); - validateIndexRecoveryState(shardResponse.recoveryState().getIndex()); + for (RecoveryState recoveryState : recoveryStates) { + assertRecoveryState(recoveryState, 0, Type.SNAPSHOT, Stage.DONE, null, nodeA, true); + validateIndexRecoveryState(recoveryState.getIndex()); } } } - private List findRecoveriesForTargetNode(String nodeName, List responses) { - List nodeResponses = new ArrayList<>(); - for (ShardRecoveryResponse response : responses) { - if (response.recoveryState().getTargetNode().getName().equals(nodeName)) { - nodeResponses.add(response); + private List findRecoveriesForTargetNode(String nodeName, List recoveryStates) { + List nodeResponses = new ArrayList<>(); + for (RecoveryState recoveryState : recoveryStates) { + if (recoveryState.getTargetNode().getName().equals(nodeName)) { + nodeResponses.add(recoveryState); } } return nodeResponses; @@ -519,7 +534,6 @@ public class IndexRecoveryIT extends ESIntegTestCase { final Settings nodeSettings = Settings.builder() .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, "100ms") .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, "1s") - .put("plugin.types", MockTransportService.TestPlugin.class.getName()) .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) // restarted recoveries will delete temp files and write them again .build(); // start a master node diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 898415152ca..a287bcb4f54 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoverySource; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.InternalTestCluster; @@ -63,6 +64,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; @@ -87,10 +89,14 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // which is between 1 and 2 sec can cause each of the shard deletion requests to timeout. // to prevent this we are setting the timeout here to something highish ie. the default in practice .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(30, TimeUnit.SECONDS)) - .extendArray("plugin.types", MockTransportService.TestPlugin.class.getName()) .build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(MockTransportService.TestPlugin.class); + } + @Override protected void ensureClusterStateConsistency() throws IOException { // testShardActiveElseWhere might change the state of a non-master node diff --git a/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringIT.java b/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringIT.java index e441a952f21..f67e12095f3 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringIT.java @@ -33,6 +33,8 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.junit.Test; +import java.util.Collection; + import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; @@ -41,11 +43,8 @@ import static org.hamcrest.core.IsNull.notNullValue; public class IndexTemplateFilteringIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", TestPlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(TestPlugin.class); } @Test diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java index 695845bca0d..3a00be0aeb9 100644 --- a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.indices.warmer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; - import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; @@ -49,7 +48,10 @@ import org.junit.Test; import java.util.Locale; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; public class SimpleIndicesWarmerIT extends ESIntegTestCase { @@ -272,7 +274,7 @@ public class SimpleIndicesWarmerIT extends ESIntegTestCase { for (IndexShardSegments indexShardSegments : indicesSegments) { for (ShardSegments shardSegments : indexShardSegments) { for (Segment segment : shardSegments) { - logger.debug("+=" + segment.memoryInBytes + " " + indexShardSegments.getShardId() + " " + shardSegments.getIndex()); + logger.debug("+=" + segment.memoryInBytes + " " + indexShardSegments.getShardId() + " " + shardSegments.getShardRouting().getIndex()); total += segment.memoryInBytes; } } diff --git a/core/src/test/java/org/elasticsearch/node/MockNode.java b/core/src/test/java/org/elasticsearch/node/MockNode.java new file mode 100644 index 00000000000..04762641f00 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/node/MockNode.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.node; + +import org.elasticsearch.Version; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; + +import java.util.Collection; + +/** + * A node for testing which allows: + *
    + *
  • Overriding Version.CURRENT
  • + *
  • Adding test plugins that exist on the classpath
  • + *
+ */ +public class MockNode extends Node { + + // these are kept here so a copy of this MockNode can be created, since Node does not store them + private Version version; + private Collection> plugins; + + public MockNode(Settings settings, boolean loadConfigSettings, Version version, Collection> classpathPlugins) { + super(settings, loadConfigSettings, version, classpathPlugins); + this.version = version; + this.plugins = classpathPlugins; + } + + public Collection> getPlugins() { + return plugins; + } + + public Version getVersion() { + return version; + } +} diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index 86b1a6a4cc3..406cf68a98e 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -22,7 +22,7 @@ package org.elasticsearch.nodesinfo; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.plugins.PluginTestCase; +import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.junit.Test; @@ -36,7 +36,7 @@ import static org.hamcrest.Matchers.*; * */ @ClusterScope(scope= Scope.TEST, numDataNodes =0) -public class SimpleNodesInfoIT extends PluginTestCase { +public class SimpleNodesInfoIT extends ESIntegTestCase { static final class Fields { static final String SITE_PLUGIN = "dummy"; diff --git a/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java b/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java index bbeeac122f9..a6ac6bf1c69 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.transport.*; import org.junit.Test; import java.io.IOException; +import java.util.Collection; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -51,16 +52,17 @@ public class PluggableTransportModuleIT extends ESIntegTestCase { return settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) .put(DiscoveryModule.DISCOVERY_TYPE_KEY, "local") - .put("plugin.types", CountingSentRequestsPlugin.class.getName()) .build(); } @Override - protected Settings transportClientSettings() { - return settingsBuilder() - .put("plugin.types", CountingSentRequestsPlugin.class.getName()) - .put(super.transportClientSettings()) - .build(); + protected Collection> nodePlugins() { + return pluginList(CountingSentRequestsPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return pluginList(CountingSentRequestsPlugin.class); } @Test diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java index 6ee0fc8b8b8..cf44a7614dc 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java @@ -82,12 +82,12 @@ public class PluginManagerUnitTests extends ESTestCase { Iterator iterator = handle.urls().iterator(); if (supportStagingUrls) { - String expectedStagingURL = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", + String expectedStagingURL = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", Version.CURRENT.number(), Build.CURRENT.hashShort(), pluginName, Version.CURRENT.number(), pluginName, Version.CURRENT.number()); assertThat(iterator.next().toExternalForm(), is(expectedStagingURL)); } - URL expected = new URL("http", "download.elastic.co", "/elasticsearch/release/org/elasticsearch/plugin/" + pluginName + "/" + Version.CURRENT.number() + "/" + + URL expected = new URL("https", "download.elastic.co", "/elasticsearch/release/org/elasticsearch/plugin/" + pluginName + "/" + Version.CURRENT.number() + "/" + pluginName + "-" + Version.CURRENT.number() + ".zip"); assertThat(iterator.next().toExternalForm(), is(expected.toExternalForm())); @@ -108,12 +108,12 @@ public class PluginManagerUnitTests extends ESTestCase { Iterator iterator = handle.urls().iterator(); if (supportStagingUrls) { - String expectedStagingUrl = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", + String expectedStagingUrl = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", Version.CURRENT.number(), Build.CURRENT.hashShort(), randomPluginName, Version.CURRENT.number(), randomPluginName, Version.CURRENT.number()); assertThat(iterator.next().toExternalForm(), is(expectedStagingUrl)); } - String releaseUrl = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip", + String releaseUrl = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip", randomPluginName, Version.CURRENT.number(), randomPluginName, Version.CURRENT.number()); assertThat(iterator.next().toExternalForm(), is(releaseUrl)); diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginTestCase.java b/core/src/test/java/org/elasticsearch/plugins/PluginTestCase.java deleted file mode 100644 index dffcba6e69f..00000000000 --- a/core/src/test/java/org/elasticsearch/plugins/PluginTestCase.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugins; - -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESIntegTestCase; - -import java.net.URISyntaxException; -import java.net.URL; - -import static org.elasticsearch.client.Requests.clusterHealthRequest; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; - -/** - * Base class that lets you start a node with plugins. - */ -public abstract class PluginTestCase extends ESIntegTestCase { - - public String startNodeWithPlugins(Settings nodeSettings, String pluginDir, String ... pluginClassNames) throws URISyntaxException { - URL resource = getClass().getResource(pluginDir); - Settings.Builder settings = settingsBuilder(); - settings.put(nodeSettings); - if (resource != null) { - settings.put("path.plugins", getDataPath(pluginDir).toAbsolutePath()); - } - - if (pluginClassNames.length > 0) { - settings.putArray("plugin.types", pluginClassNames); - } - - String nodeName = internalCluster().startNode(settings); - - // We wait for a Green status - client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet(); - - return internalCluster().getInstance(ClusterService.class, nodeName).state().nodes().localNodeId(); - } -} diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 6ac34c2297e..c497c1ab6cc 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.test.ESTestCase; +import java.util.Arrays; + public class PluginsServiceTests extends ESTestCase { public static class AdditionalSettingsPlugin1 extends Plugin { @Override @@ -54,13 +56,16 @@ public class PluginsServiceTests extends ESTestCase { } } + static PluginsService newPluginsService(Settings settings, Class... classpathPlugins) { + return new PluginsService(settings, new Environment(settings), Arrays.asList(classpathPlugins)); + } + public void testAdditionalSettings() { Settings settings = Settings.builder() .put("path.home", createTempDir()) .put("my.setting", "test") - .put(IndexStoreModule.STORE_TYPE, IndexStoreModule.Type.SIMPLEFS.getSettingsKey()) - .putArray("plugin.types", AdditionalSettingsPlugin1.class.getName()).build(); - PluginsService service = new PluginsService(settings, new Environment(settings)); + .put(IndexStoreModule.STORE_TYPE, IndexStoreModule.Type.SIMPLEFS.getSettingsKey()).build(); + PluginsService service = newPluginsService(settings, AdditionalSettingsPlugin1.class); Settings newSettings = service.updatedSettings(); assertEquals("test", newSettings.get("my.setting")); // previous settings still exist assertEquals("1", newSettings.get("foo.bar")); // added setting exists @@ -69,9 +74,8 @@ public class PluginsServiceTests extends ESTestCase { public void testAdditionalSettingsClash() { Settings settings = Settings.builder() - .put("path.home", createTempDir()) - .putArray("plugin.types", AdditionalSettingsPlugin1.class.getName(), AdditionalSettingsPlugin2.class.getName()).build(); - PluginsService service = new PluginsService(settings, new Environment(settings)); + .put("path.home", createTempDir()).build(); + PluginsService service = newPluginsService(settings, AdditionalSettingsPlugin1.class, AdditionalSettingsPlugin2.class); try { service.updatedSettings(); fail("Expected exception when building updated settings"); diff --git a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java index b171278d659..d9580854c14 100644 --- a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java +++ b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java @@ -25,6 +25,8 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.rest.client.http.HttpResponse; import org.junit.Test; +import java.util.Collection; + import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.RestStatus.UNAUTHORIZED; import static org.elasticsearch.test.ESIntegTestCase.Scope; @@ -41,11 +43,15 @@ public class ResponseHeaderPluginIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", TestResponseHeaderPlugin.class.getName()) .put("force.http.enabled", true) .build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(TestResponseHeaderPlugin.class); + } + @Test public void testThatSettingHeadersWorks() throws Exception { ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index 60a6acb7ed9..450212b75b5 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -284,7 +284,7 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().get(); for (ShardStats shardStats : indicesStatsResponse.getShards()) { DocsStats docsStats = shardStats.getStats().docs; - logger.info("shard [{}] - count {}, primary {}", shardStats.getShardId(), docsStats.getCount(), shardStats.getShardRouting().primary()); + logger.info("shard [{}] - count {}, primary {}", shardStats.getShardRouting().id(), docsStats.getCount(), shardStats.getShardRouting().primary()); } //if there was an error we try to wait and see if at some point it'll get fixed diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index 39622287ea2..124f055b334 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -54,6 +54,7 @@ import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.BackgroundIndexer; @@ -63,7 +64,6 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -76,6 +76,7 @@ import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; @@ -100,12 +101,10 @@ public class RelocationIT extends ESIntegTestCase { private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES); @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put("plugin.types", MockTransportService.TestPlugin.class.getName()).build(); + protected Collection> nodePlugins() { + return pluginList(MockTransportService.TestPlugin.class); } - @Test public void testSimpleRelocationNoIndexing() { logger.info("--> starting [node1] ..."); @@ -422,7 +421,7 @@ public class RelocationIT extends ESIntegTestCase { public boolean apply(Object input) { RecoveryResponse recoveryResponse = internalCluster().client(redNodeName).admin().indices().prepareRecoveries(indexName) .get(); - return !recoveryResponse.shardResponses().get(indexName).isEmpty(); + return !recoveryResponse.shardRecoveryStates().get(indexName).isEmpty(); } } ); diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 943a1485100..25347fa1fab 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.*; @@ -41,6 +42,7 @@ import org.junit.Test; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -55,14 +57,19 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; @SuppressCodecs("*") // test relies on exact file extensions public class TruncatedRecoveryIT extends ESIntegTestCase { + @Override protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .extendArray("plugin.types", MockTransportService.TestPlugin.class.getName()) .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)); return builder.build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(MockTransportService.TestPlugin.class); + } + /** * This test tries to truncate some of larger files in the index to trigger leftovers on the recovery * target. This happens during recovery when the last chunk of the file is transferred to the replica diff --git a/core/src/test/java/org/elasticsearch/script/CustomScriptContextIT.java b/core/src/test/java/org/elasticsearch/script/CustomScriptContextIT.java index efac975fcc3..8f4a71dce08 100644 --- a/core/src/test/java/org/elasticsearch/script/CustomScriptContextIT.java +++ b/core/src/test/java/org/elasticsearch/script/CustomScriptContextIT.java @@ -28,6 +28,8 @@ import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; +import java.util.Collection; + import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.notNullValue; @@ -40,12 +42,16 @@ public class CustomScriptContextIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", CustomScriptContextPlugin.class.getName()) .put("script." + PLUGIN_NAME + "_custom_globally_disabled_op", "off") .put("script.engine.expression.inline." + PLUGIN_NAME + "_custom_exp_disabled_op", "off") .build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(CustomScriptContextPlugin.class); + } + @Test public void testCustomScriptContextsSettings() { ScriptService scriptService = internalCluster().getInstance(ScriptService.class); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptFieldIT.java b/core/src/test/java/org/elasticsearch/script/ScriptFieldIT.java index c9707f7bdbb..d3f0923b61c 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptFieldIT.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptFieldIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import java.util.Collection; import java.util.Map; import java.util.concurrent.ExecutionException; @@ -40,8 +41,8 @@ import static org.hamcrest.Matchers.equalTo; public class ScriptFieldIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("plugin.types", CustomScriptPlugin.class.getName()).build(); + protected Collection> nodePlugins() { + return pluginList(CustomScriptPlugin.class); } static int[] intArray = { Integer.MAX_VALUE, Integer.MIN_VALUE, 3 }; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index bb0340f193a..0461edef6b4 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -93,11 +93,15 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { public Settings nodeSettings(int nodeOrdinal) { return settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", CustomSignificanceHeuristicPlugin.class.getName()) .put("path.conf", this.getDataPath("config")) .build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(CustomSignificanceHeuristicPlugin.class); + } + public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString(); } diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java index 76e24467049..556bf136c30 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java @@ -24,199 +24,37 @@ import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.LeafReader; import org.apache.lucene.util.English; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Requests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.engine.MockEngineSupport; import org.elasticsearch.test.engine.MockEngineSupportModule; import org.elasticsearch.test.engine.ThrowingLeafReaderWrapper; -import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.store.MockFSDirectoryService; -import org.junit.Test; import java.io.IOException; +import java.util.Collection; import java.util.Random; import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; public class SearchWithRandomExceptionsIT extends ESIntegTestCase { - @Test - @TestLogging("action.search.type:TRACE,index.shard:TRACE") - public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException { - String mapping = XContentFactory.jsonBuilder(). - startObject(). - startObject("type"). - startObject("properties"). - startObject("test") - .field("type", "string") - .field("index", "not_analyzed") - .endObject(). - endObject(). - endObject() - .endObject().string(); - final double exceptionRate; - final double exceptionOnOpenRate; - if (frequently()) { - if (randomBoolean()) { - if (randomBoolean()) { - exceptionOnOpenRate = 1.0 / between(5, 100); - exceptionRate = 0.0d; - } else { - exceptionRate = 1.0 / between(5, 100); - exceptionOnOpenRate = 0.0d; - } - } else { - exceptionOnOpenRate = 1.0 / between(5, 100); - exceptionRate = 1.0 / between(5, 100); - } - } else { - // rarely no exception - exceptionRate = 0d; - exceptionOnOpenRate = 0d; - } - final boolean createIndexWithoutErrors = randomBoolean(); - int numInitialDocs = 0; - - if (createIndexWithoutErrors) { - Builder settings = settingsBuilder() - .put("index.number_of_replicas", numberOfReplicas()); - logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap()); - client().admin().indices().prepareCreate("test") - .setSettings(settings) - .addMapping("type", mapping).execute().actionGet(); - numInitialDocs = between(10, 100); - ensureGreen(); - for (int i = 0; i < numInitialDocs; i++) { - client().prepareIndex("test", "type", "init" + i).setSource("test", "init").get(); - } - client().admin().indices().prepareRefresh("test").execute().get(); - client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).execute().get(); - client().admin().indices().prepareClose("test").execute().get(); - client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder() - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, exceptionRate) - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate)); - client().admin().indices().prepareOpen("test").execute().get(); - } else { - Builder settings = settingsBuilder() - .put("index.number_of_replicas", randomIntBetween(0, 1)) - .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, exceptionRate) - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate); // we cannot expect that the index will be valid - logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap()); - client().admin().indices().prepareCreate("test") - .setSettings(settings) - .addMapping("type", mapping).execute().actionGet(); - } - ClusterHealthResponse clusterHealthResponse = client().admin().cluster() - .health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get(); // it's OK to timeout here - final int numDocs; - final boolean expectAllShardsFailed; - if (clusterHealthResponse.isTimedOut()) { - /* some seeds just won't let you create the index at all and we enter a ping-pong mode - * trying one node after another etc. that is ok but we need to make sure we don't wait - * forever when indexing documents so we set numDocs = 1 and expecte all shards to fail - * when we search below.*/ - logger.info("ClusterHealth timed out - only index one doc and expect searches to fail"); - numDocs = 1; - expectAllShardsFailed = true; - } else { - numDocs = between(10, 100); - expectAllShardsFailed = false; - } - int numCreated = 0; - boolean[] added = new boolean[numDocs]; - for (int i = 0; i < numDocs; i++) { - added[i] = false; - try { - IndexResponse indexResponse = client().prepareIndex("test", "type", Integer.toString(i)).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get(); - if (indexResponse.isCreated()) { - numCreated++; - added[i] = true; - } - } catch (ElasticsearchException ex) { - } - - } - NumShards numShards = getNumShards("test"); - logger.info("Start Refresh"); - final RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here - final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0; - logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards()); - final int numSearches = scaledRandomIntBetween(10, 20); - // we don't check anything here really just making sure we don't leave any open files or a broken index behind. - for (int i = 0; i < numSearches; i++) { - try { - int docToQuery = between(0, numDocs - 1); - int expectedResults = added[docToQuery] ? 1 : 0; - logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery)); - SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))) - .setSize(expectedResults).get(); - logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries); - if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) { - assertResultsAndLogOnFailure(expectedResults, searchResponse); - } - // check match all - searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchAllQuery()) - .setSize(numCreated + numInitialDocs).addSort("_uid", SortOrder.ASC).get(); - logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries); - if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) { - assertResultsAndLogOnFailure(numCreated + numInitialDocs, searchResponse); - } - } catch (SearchPhaseExecutionException ex) { - logger.info("SearchPhaseException: [{}]", ex.getMessage()); - // if a scheduled refresh or flush fails all shards we see all shards failed here - if (!(expectAllShardsFailed || refreshResponse.getSuccessfulShards() == 0 || ex.getMessage().contains("all shards failed"))) { - throw ex; - } - } - } - - if (createIndexWithoutErrors) { - // check the index still contains the records that we indexed without errors - client().admin().indices().prepareClose("test").execute().get(); - client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder() - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, 0) - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0)); - client().admin().indices().prepareOpen("test").execute().get(); - ensureGreen(); - SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, numInitialDocs); - } + @Override + protected Collection> nodePlugins() { + return pluginList(RandomExceptionDirectoryReaderWrapper.TestPlugin.class); } - private void assertResultsAndLogOnFailure(long expectedResults, SearchResponse searchResponse) { - if (searchResponse.getHits().getTotalHits() != expectedResults) { - StringBuilder sb = new StringBuilder("search result contains ["); - sb.append(searchResponse.getHits().getTotalHits()).append("] results. expected [").append(expectedResults).append("]"); - String failMsg = sb.toString(); - for (SearchHit hit : searchResponse.getHits().getHits()) { - sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType()) - .append("] id [").append(hit.id()).append("]"); - } - logger.warn(sb.toString()); - fail(failMsg); - } - } - - @Test public void testRandomExceptions() throws IOException, InterruptedException, ExecutionException { String mapping = XContentFactory.jsonBuilder(). startObject(). @@ -252,10 +90,9 @@ public class SearchWithRandomExceptionsIT extends ESIntegTestCase { Builder settings = settingsBuilder() .put(indexSettings()) - .extendArray("plugin.types", RandomExceptionDirectoryReaderWrapper.TestPlugin.class.getName()) .put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate) .put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate) - .put(MockEngineSupport.WRAP_READER_RATIO, 1.0d); + .put(MockEngineSupport.WRAP_READER_RATIO, 1.0d); logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap()); assertAcked(prepareCreate("test") .setSettings(settings) diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java new file mode 100644 index 00000000000..457f63d54e5 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -0,0 +1,191 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.basic; + +import org.apache.lucene.util.English; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.store.MockFSDirectoryService; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; + +public class SearchWithRandomIOExceptionsIT extends ESIntegTestCase { + + @TestLogging("action.search.type:TRACE,index.shard:TRACE") + public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException { + String mapping = XContentFactory.jsonBuilder(). + startObject(). + startObject("type"). + startObject("properties"). + startObject("test") + .field("type", "string") + .field("index", "not_analyzed") + .endObject(). + endObject(). + endObject() + .endObject().string(); + final double exceptionRate; + final double exceptionOnOpenRate; + if (frequently()) { + if (randomBoolean()) { + if (randomBoolean()) { + exceptionOnOpenRate = 1.0 / between(5, 100); + exceptionRate = 0.0d; + } else { + exceptionRate = 1.0 / between(5, 100); + exceptionOnOpenRate = 0.0d; + } + } else { + exceptionOnOpenRate = 1.0 / between(5, 100); + exceptionRate = 1.0 / between(5, 100); + } + } else { + // rarely no exception + exceptionRate = 0d; + exceptionOnOpenRate = 0d; + } + final boolean createIndexWithoutErrors = randomBoolean(); + int numInitialDocs = 0; + + if (createIndexWithoutErrors) { + Settings.Builder settings = settingsBuilder() + .put("index.number_of_replicas", numberOfReplicas()); + logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap()); + client().admin().indices().prepareCreate("test") + .setSettings(settings) + .addMapping("type", mapping).execute().actionGet(); + numInitialDocs = between(10, 100); + ensureGreen(); + for (int i = 0; i < numInitialDocs; i++) { + client().prepareIndex("test", "type", "init" + i).setSource("test", "init").get(); + } + client().admin().indices().prepareRefresh("test").execute().get(); + client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).execute().get(); + client().admin().indices().prepareClose("test").execute().get(); + client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder() + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, exceptionRate) + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate)); + client().admin().indices().prepareOpen("test").execute().get(); + } else { + Settings.Builder settings = settingsBuilder() + .put("index.number_of_replicas", randomIntBetween(0, 1)) + .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, exceptionRate) + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate); // we cannot expect that the index will be valid + logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap()); + client().admin().indices().prepareCreate("test") + .setSettings(settings) + .addMapping("type", mapping).execute().actionGet(); + } + ClusterHealthResponse clusterHealthResponse = client().admin().cluster() + .health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get(); // it's OK to timeout here + final int numDocs; + final boolean expectAllShardsFailed; + if (clusterHealthResponse.isTimedOut()) { + /* some seeds just won't let you create the index at all and we enter a ping-pong mode + * trying one node after another etc. that is ok but we need to make sure we don't wait + * forever when indexing documents so we set numDocs = 1 and expecte all shards to fail + * when we search below.*/ + logger.info("ClusterHealth timed out - only index one doc and expect searches to fail"); + numDocs = 1; + expectAllShardsFailed = true; + } else { + numDocs = between(10, 100); + expectAllShardsFailed = false; + } + int numCreated = 0; + boolean[] added = new boolean[numDocs]; + for (int i = 0; i < numDocs; i++) { + added[i] = false; + try { + IndexResponse indexResponse = client().prepareIndex("test", "type", Integer.toString(i)).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get(); + if (indexResponse.isCreated()) { + numCreated++; + added[i] = true; + } + } catch (ElasticsearchException ex) { + } + + } + ESIntegTestCase.NumShards numShards = getNumShards("test"); + logger.info("Start Refresh"); + final RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here + final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0; + logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards()); + final int numSearches = scaledRandomIntBetween(10, 20); + // we don't check anything here really just making sure we don't leave any open files or a broken index behind. + for (int i = 0; i < numSearches; i++) { + try { + int docToQuery = between(0, numDocs - 1); + int expectedResults = added[docToQuery] ? 1 : 0; + logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery)); + SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))) + .setSize(expectedResults).get(); + logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries); + if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) { + assertResultsAndLogOnFailure(expectedResults, searchResponse); + } + // check match all + searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchAllQuery()) + .setSize(numCreated + numInitialDocs).addSort("_uid", SortOrder.ASC).get(); + logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries); + if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) { + assertResultsAndLogOnFailure(numCreated + numInitialDocs, searchResponse); + } + } catch (SearchPhaseExecutionException ex) { + logger.info("SearchPhaseException: [{}]", ex.getMessage()); + // if a scheduled refresh or flush fails all shards we see all shards failed here + if (!(expectAllShardsFailed || refreshResponse.getSuccessfulShards() == 0 || ex.getMessage().contains("all shards failed"))) { + throw ex; + } + } + } + + if (createIndexWithoutErrors) { + // check the index still contains the records that we indexed without errors + client().admin().indices().prepareClose("test").execute().get(); + client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder() + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, 0) + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0)); + client().admin().indices().prepareOpen("test").execute().get(); + ensureGreen(); + SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, numInitialDocs); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index 58ec58bfdf3..ca98047a590 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -43,6 +43,7 @@ import org.junit.Test; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -59,11 +60,8 @@ import static org.hamcrest.Matchers.equalTo; public class FetchSubPhasePluginIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", FetchTermVectorsPlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(FetchTermVectorsPlugin.class); } @Test diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index 44b7ea75494..ad9c0af267b 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.AbstractDoubleSearchScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ExplainableSearchScript; @@ -42,6 +43,7 @@ import org.junit.Test; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; @@ -60,11 +62,8 @@ import static org.hamcrest.Matchers.equalTo; public class ExplainableScriptIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", ExplainableScriptPlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(ExplainableScriptPlugin.class); } @Test diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index d1dd06d8d82..946fb593bd1 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -37,6 +37,8 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.junit.Test; +import java.util.Collection; + import static org.elasticsearch.client.Requests.indexRequest; import static org.elasticsearch.client.Requests.searchRequest; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -53,11 +55,8 @@ import static org.hamcrest.Matchers.equalTo; public class FunctionScorePluginIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", CustomDistanceScorePlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(CustomDistanceScorePlugin.class); } @Test diff --git a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchIT.java index e254b4a0808..152a66218e2 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchIT.java @@ -22,6 +22,7 @@ import com.google.common.collect.Maps; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -29,6 +30,7 @@ import org.junit.Before; import org.junit.Test; import java.io.IOException; +import java.util.Collection; import java.util.Map; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -42,11 +44,8 @@ import static org.hamcrest.Matchers.equalTo; public class CustomHighlighterSearchIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", CustomHighlighterPlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(CustomHighlighterPlugin.class); } @Before diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchIT.java index 90fcee206ed..9e51d259b91 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchIT.java @@ -23,10 +23,12 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.junit.Test; +import java.util.Collection; import java.util.List; import java.util.Locale; @@ -42,8 +44,8 @@ import static org.hamcrest.Matchers.is; public class CustomSuggesterSearchIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("plugin.types", CustomSuggesterPlugin.class.getName()).build(); + protected Collection> nodePlugins() { + return pluginList(CustomSuggesterPlugin.class); } @Test diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 4e30e3ca770..8000fc6d9d9 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ESIntegTestCase; @@ -43,6 +44,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collection; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -60,7 +62,12 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { // Rebalancing is causing some checks after restore to randomly fail // due to https://github.com/elastic/elasticsearch/issues/9421 .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE) - .extendArray("plugin.types", MockRepository.Plugin.class.getName()).build(); + .build(); + } + + @Override + protected Collection> nodePlugins() { + return pluginList(MockRepository.Plugin.class); } public static long getFailureCount(String repository) { diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index d36ecfa7d46..3454cd2561b 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -32,7 +32,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.AbstractDiffable; @@ -55,7 +54,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.ttl.IndicesTTLService; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; @@ -71,6 +72,7 @@ import org.junit.Test; import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Collection; import java.util.EnumSet; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; @@ -81,8 +83,18 @@ import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.ESIntegTestCase.ClusterScope; import static org.elasticsearch.test.ESIntegTestCase.Scope; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; -import static org.hamcrest.Matchers.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; /** */ @@ -90,6 +102,11 @@ import static org.hamcrest.Matchers.*; @ESIntegTestCase.SuppressLocalMode // TODO only restorePersistentSettingsTest needs this maybe factor out? public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { + @Override + protected Collection> nodePlugins() { + return pluginList(MockRepository.Plugin.class); + } + @Test public void restorePersistentSettingsTest() throws Exception { logger.info("--> start 2 nodes"); @@ -592,9 +609,9 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest assertThat(client().prepareCount("test-idx").get().getCount(), equalTo(100L)); IntSet reusedShards = new IntHashSet(); - for (ShardRecoveryResponse response : client().admin().indices().prepareRecoveries("test-idx").get().shardResponses().get("test-idx")) { - if (response.recoveryState().getIndex().reusedBytes() > 0) { - reusedShards.add(response.getShardId()); + for (RecoveryState recoveryState : client().admin().indices().prepareRecoveries("test-idx").get().shardRecoveryStates().get("test-idx")) { + if (recoveryState.getIndex().reusedBytes() > 0) { + reusedShards.add(recoveryState.getShardId().getId()); } } logger.info("--> check that at least half of the shards had some reuse: [{}]", reusedShards); @@ -605,7 +622,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest @Test public void registrationFailureTest() { logger.info("--> start first node"); - internalCluster().startNode(settingsBuilder().put("plugin.types", MockRepository.Plugin.class.getName())); + internalCluster().startNode(); logger.info("--> start second node"); // Make sure the first node is elected as master internalCluster().startNode(settingsBuilder().put("node.master", false)); @@ -624,7 +641,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest @Test public void testThatSensitiveRepositorySettingsAreNotExposed() throws Exception { - Settings nodeSettings = settingsBuilder().put("plugin.types", MockRepository.Plugin.class.getName()).build(); + Settings nodeSettings = settingsBuilder().put().build(); logger.info("--> start two nodes"); internalCluster().startNodesAsync(2, nodeSettings).get(); // Register mock repositories diff --git a/core/src/test/java/org/elasticsearch/test/ESBackcompatTestCase.java b/core/src/test/java/org/elasticsearch/test/ESBackcompatTestCase.java index 39780db1026..47e163a6291 100644 --- a/core/src/test/java/org/elasticsearch/test/ESBackcompatTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ESBackcompatTestCase.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.listeners.LoggingListener; import org.elasticsearch.transport.Transport; @@ -42,6 +43,8 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; import java.util.Map; import java.util.Random; @@ -182,14 +185,14 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { @Override protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException { TestCluster cluster = super.buildTestCluster(scope, seed); - ExternalNode externalNode = new ExternalNode(backwardsCompatibilityPath(), randomLong(), new SettingsSource() { + ExternalNode externalNode = new ExternalNode(backwardsCompatibilityPath(), randomLong(), new NodeConfigurationSource() { @Override - public Settings node(int nodeOrdinal) { + public Settings nodeSettings(int nodeOrdinal) { return externalNodeSettings(nodeOrdinal); } @Override - public Settings transportClient() { + public Settings transportClientSettings() { return transportClientSettings(); } }); diff --git a/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java b/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java index 55fae7fc3fd..5327f4e6325 100644 --- a/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java @@ -27,6 +27,13 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.google.common.base.Joiner; import com.google.common.base.Predicate; import org.apache.http.impl.client.HttpClients; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -112,7 +119,9 @@ import org.elasticsearch.indices.flush.IndicesSyncedFlushResult; import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchService; import org.elasticsearch.test.client.RandomizingClient; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; @@ -855,6 +864,21 @@ public abstract class ESIntegTestCase extends ESTestCase { } } + /** Ensures the result counts are as expected, and logs the results if different */ + public void assertResultsAndLogOnFailure(long expectedResults, SearchResponse searchResponse) { + if (searchResponse.getHits().getTotalHits() != expectedResults) { + StringBuilder sb = new StringBuilder("search result contains ["); + sb.append(searchResponse.getHits().getTotalHits()).append("] results. expected [").append(expectedResults).append("]"); + String failMsg = sb.toString(); + for (SearchHit hit : searchResponse.getHits().getHits()) { + sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType()) + .append("] id [").append(hit.id()).append("]"); + } + logger.warn(sb.toString()); + fail(failMsg); + } + } + /** * Restricts the given index to be allocated on n nodes using the allocation deciders. * Yet if the shards can't be allocated on any other node shards for this index will remain allocated on @@ -1671,6 +1695,25 @@ public abstract class ESIntegTestCase extends ESTestCase { return builder.build(); } + /** + * Returns a collection of plugins that should be loaded on each node. + */ + protected Collection> nodePlugins() { + return Collections.emptyList(); + } + + /** + * Returns a collection of plugins that should be loaded when creating a transport client. + */ + protected Collection> transportClientPlugins() { + return Collections.emptyList(); + } + + /** Helper method to create list of plugins without specifying generic types. */ + protected static Collection> pluginList(Class... plugins) { + return Arrays.asList(plugins); + } + /** * This method is used to obtain additional settings for clients created by the internal cluster. * These settings will be applied on the client in addition to some randomized settings defined in @@ -1723,16 +1766,23 @@ public abstract class ESIntegTestCase extends ESTestCase { default: throw new ElasticsearchException("Scope not supported: " + scope); } - SettingsSource settingsSource = new SettingsSource() { + NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @Override - public Settings node(int nodeOrdinal) { + public Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(Node.HTTP_ENABLED, false). - put(nodeSettings(nodeOrdinal)).build(); + put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); } - @Override - public Settings transportClient() { - return transportClientSettings(); + public Collection> nodePlugins() { + return ESIntegTestCase.this.nodePlugins(); + } + @Override + public Settings transportClientSettings() { + return ESIntegTestCase.this.transportClientSettings(); + } + @Override + public Collection> transportClientPlugins() { + return ESIntegTestCase.this.transportClientPlugins(); } }; @@ -1757,7 +1807,7 @@ public abstract class ESIntegTestCase extends ESTestCase { } return new InternalTestCluster(nodeMode, seed, createTempDir(), minNumDataNodes, maxNumDataNodes, - InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", settingsSource, getNumClientNodes(), + InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix); } diff --git a/core/src/test/java/org/elasticsearch/test/ExternalNode.java b/core/src/test/java/org/elasticsearch/test/ExternalNode.java index 0d969db234b..88e5cf29ad0 100644 --- a/core/src/test/java/org/elasticsearch/test/ExternalNode.java +++ b/core/src/test/java/org/elasticsearch/test/ExternalNode.java @@ -60,7 +60,7 @@ final class ExternalNode implements Closeable { private final Path path; private final Random random; - private final SettingsSource settingsSource; + private final NodeConfigurationSource nodeConfigurationSource; private Process process; private NodeInfo nodeInfo; private final String clusterName; @@ -70,23 +70,23 @@ final class ExternalNode implements Closeable { private Settings externalNodeSettings; - ExternalNode(Path path, long seed, SettingsSource settingsSource) { - this(path, null, seed, settingsSource); + ExternalNode(Path path, long seed, NodeConfigurationSource nodeConfigurationSource) { + this(path, null, seed, nodeConfigurationSource); } - ExternalNode(Path path, String clusterName, long seed, SettingsSource settingsSource) { + ExternalNode(Path path, String clusterName, long seed, NodeConfigurationSource nodeConfigurationSource) { if (!Files.isDirectory(path)) { throw new IllegalArgumentException("path must be a directory"); } this.path = path; this.clusterName = clusterName; this.random = new Random(seed); - this.settingsSource = settingsSource; + this.nodeConfigurationSource = nodeConfigurationSource; } synchronized ExternalNode start(Client localNode, Settings defaultSettings, String nodeName, String clusterName, int nodeOrdinal) throws IOException, InterruptedException { - ExternalNode externalNode = new ExternalNode(path, clusterName, random.nextLong(), settingsSource); - Settings settings = Settings.builder().put(defaultSettings).put(settingsSource.node(nodeOrdinal)).build(); + ExternalNode externalNode = new ExternalNode(path, clusterName, random.nextLong(), nodeConfigurationSource); + Settings settings = Settings.builder().put(defaultSettings).put(nodeConfigurationSource.nodeSettings(nodeOrdinal)).build(); externalNode.startInternal(localNode, settings, nodeName, clusterName); return externalNode; } diff --git a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java index 217e8da3340..c6e81f5f80d 100644 --- a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -87,10 +87,12 @@ import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.store.IndicesStore; +import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeMocksPlugin; import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.service.NodeService; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchService; @@ -129,7 +131,6 @@ import java.util.concurrent.atomic.AtomicInteger; import static junit.framework.Assert.fail; import static org.apache.lucene.util.LuceneTestCase.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.node.NodeBuilder.nodeBuilder; import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.hamcrest.Matchers.*; @@ -150,7 +151,7 @@ public final class InternalTestCluster extends TestCluster { private final ESLogger logger = Loggers.getLogger(getClass()); - static SettingsSource DEFAULT_SETTINGS_SOURCE = SettingsSource.EMPTY; + static NodeConfigurationSource DEFAULT_SETTINGS_SOURCE = NodeConfigurationSource.EMPTY; /** * A boolean value to enable or disable mock modules. This is useful to test the @@ -171,8 +172,25 @@ public final class InternalTestCluster extends TestCluster { */ public static final int PORTS_PER_JVM = 100; + /** + * The number of ports in the range used for this cluster + */ + public static final int PORTS_PER_CLUSTER = 20; + + private static final int GLOBAL_TRANSPORT_BASE_PORT = 9300; + private static final int GLOBAL_HTTP_BASE_PORT = 19200; + private static final int JVM_ORDINAL = Integer.parseInt(System.getProperty(SysGlobals.CHILDVM_SYSPROP_JVM_ID, "0")); - public static final int BASE_PORT = 9300 + PORTS_PER_JVM * (JVM_ORDINAL + 1); + + /** a per-JVM unique offset to be used for calculating unique port ranges. */ + public static final int JVM_BASE_PORT_OFFEST = PORTS_PER_JVM * (JVM_ORDINAL + 1); + + private static final AtomicInteger clusterOrdinal = new AtomicInteger(); + private final int CLUSTER_BASE_PORT_OFFSET = JVM_BASE_PORT_OFFEST + (clusterOrdinal.getAndIncrement() * PORTS_PER_CLUSTER) % PORTS_PER_JVM; + + public final int TRANSPORT_BASE_PORT = GLOBAL_TRANSPORT_BASE_PORT + CLUSTER_BASE_PORT_OFFSET; + public final int HTTP_BASE_PORT = GLOBAL_HTTP_BASE_PORT + CLUSTER_BASE_PORT_OFFSET; + private static final boolean ENABLE_MOCK_MODULES = RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true); @@ -207,7 +225,7 @@ public final class InternalTestCluster extends TestCluster { private final int numSharedClientNodes; - private final SettingsSource settingsSource; + private final NodeConfigurationSource nodeConfigurationSource; private final ExecutorService executor; @@ -221,7 +239,7 @@ public final class InternalTestCluster extends TestCluster { private String nodeMode; public InternalTestCluster(String nodeMode, long clusterSeed, Path baseDir, - int minNumDataNodes, int maxNumDataNodes, String clusterName, SettingsSource settingsSource, int numClientNodes, + int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes, boolean enableHttpPipelining, String nodePrefix) { super(clusterSeed); if ("network".equals(nodeMode) == false && "local".equals(nodeMode) == false) { @@ -272,7 +290,7 @@ public final class InternalTestCluster extends TestCluster { } logger.info("Setup InternalTestCluster [{}] with seed [{}] using [{}] data nodes and [{}] client nodes", clusterName, SeedUtils.formatSeed(clusterSeed), numSharedDataNodes, numSharedClientNodes); - this.settingsSource = settingsSource; + this.nodeConfigurationSource = nodeConfigurationSource; Builder builder = Settings.settingsBuilder(); if (random.nextInt(5) == 0) { // sometimes set this // randomize (multi/single) data path, special case for 0, don't set it at all... @@ -288,8 +306,8 @@ public final class InternalTestCluster extends TestCluster { builder.put("path.shared_data", baseDir.resolve("custom")); builder.put("path.home", baseDir); builder.put("path.repo", baseDir.resolve("repos")); - builder.put("transport.tcp.port", BASE_PORT + "-" + (BASE_PORT + 100)); - builder.put("http.port", BASE_PORT + 101 + "-" + (BASE_PORT + 200)); + builder.put("transport.tcp.port", TRANSPORT_BASE_PORT + "-" + (TRANSPORT_BASE_PORT + PORTS_PER_CLUSTER)); + builder.put("http.port", HTTP_BASE_PORT + "-" + (HTTP_BASE_PORT + PORTS_PER_CLUSTER)); builder.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true); builder.put("node.mode", nodeMode); builder.put("http.pipelining", enableHttpPipelining); @@ -352,7 +370,7 @@ public final class InternalTestCluster extends TestCluster { private Settings getSettings(int nodeOrdinal, long nodeSeed, Settings others) { Builder builder = Settings.settingsBuilder().put(defaultSettings) .put(getRandomNodeSettings(nodeSeed)); - Settings settings = settingsSource.node(nodeOrdinal); + Settings settings = nodeConfigurationSource.nodeSettings(nodeOrdinal); if (settings != null) { if (settings.get(ClusterName.SETTING) != null) { throw new IllegalStateException("Tests must not set a '" + ClusterName.SETTING + "' as a node setting set '" + ClusterName.SETTING + "': [" + settings.get(ClusterName.SETTING) + "]"); @@ -366,21 +384,27 @@ public final class InternalTestCluster extends TestCluster { return builder.build(); } + private Collection> getPlugins(long seed) { + Set> plugins = new HashSet<>(nodeConfigurationSource.nodePlugins()); + Random random = new Random(seed); + if (ENABLE_MOCK_MODULES && usually(random)) { + plugins.add(MockTransportService.TestPlugin.class); + plugins.add(MockFSIndexStore.TestPlugin.class); + plugins.add(NodeMocksPlugin.class); + plugins.add(MockEngineFactoryPlugin.class); + plugins.add(MockSearchService.TestPlugin.class); + } + if (isLocalTransportConfigured()) { + plugins.add(AssertingLocalTransport.TestPlugin.class); + } + return plugins; + } + private Settings getRandomNodeSettings(long seed) { Random random = new Random(seed); Builder builder = Settings.settingsBuilder() .put(SETTING_CLUSTER_NODE_SEED, seed); - if (ENABLE_MOCK_MODULES && usually(random)) { - builder.extendArray("plugin.types", - MockTransportService.TestPlugin.class.getName(), - MockFSIndexStore.TestPlugin.class.getName(), - NodeMocksPlugin.class.getName(), - MockEngineFactoryPlugin.class.getName(), - MockSearchService.TestPlugin.class.getName()); - } - if (isLocalTransportConfigured()) { - builder.extendArray("plugin.types", AssertingLocalTransport.TestPlugin.class.getName()); - } else { + if (isLocalTransportConfigured() == false) { builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, rarely(random)); } if (random.nextBoolean()) { @@ -609,6 +633,7 @@ public final class InternalTestCluster extends TestCluster { assert Thread.holdsLock(this); ensureOpen(); settings = getSettings(nodeId, seed, settings); + Collection> plugins = getPlugins(seed); String name = buildNodeName(nodeId); assert !nodes.containsKey(name); Settings finalSettings = settingsBuilder() @@ -616,9 +641,8 @@ public final class InternalTestCluster extends TestCluster { .put(settings) .put("name", name) .put("discovery.id.seed", seed) - .put("tests.mock.version", version) .build(); - Node node = nodeBuilder().settings(finalSettings).build(); + MockNode node = new MockNode(finalSettings, true, version, plugins); return new NodeAndClient(name, node); } @@ -775,13 +799,13 @@ public final class InternalTestCluster extends TestCluster { } private final class NodeAndClient implements Closeable { - private Node node; + private MockNode node; private Client nodeClient; private Client transportClient; private final AtomicBoolean closed = new AtomicBoolean(false); private final String name; - NodeAndClient(String name, Node node) { + NodeAndClient(String name, MockNode node) { this.node = node; this.name = name; } @@ -836,7 +860,7 @@ public final class InternalTestCluster extends TestCluster { /* no sniff client for now - doesn't work will all tests since it might throw NoNodeAvailableException if nodes are shut down. * we first need support of transportClientRatio as annotations or so */ - return transportClient = new TransportClientFactory(false, settingsSource.transportClient(), baseDir, nodeMode).client(node, clusterName); + return transportClient = new TransportClientFactory(false, nodeConfigurationSource.transportClientSettings(), baseDir, nodeMode, nodeConfigurationSource.transportClientPlugins()).client(node, clusterName); } void resetClient() throws IOException { @@ -868,7 +892,11 @@ public final class InternalTestCluster extends TestCluster { IOUtils.rm(nodeEnv.nodeDataPaths()); } } - node = nodeBuilder().settings(node.settings()).settings(newSettings).node(); + Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).build(); + Collection> plugins = node.getPlugins(); + Version version = node.getVersion(); + node = new MockNode(finalSettings, true, version, plugins); + node.start(); } void registerDataPath() { @@ -894,12 +922,14 @@ public final class InternalTestCluster extends TestCluster { private final Settings settings; private final Path baseDir; private final String nodeMode; + private final Collection> plugins; - TransportClientFactory(boolean sniff, Settings settings, Path baseDir, String nodeMode) { + TransportClientFactory(boolean sniff, Settings settings, Path baseDir, String nodeMode, Collection> plugins) { this.sniff = sniff; this.settings = settings != null ? settings : Settings.EMPTY; this.baseDir = baseDir; this.nodeMode = nodeMode; + this.plugins = plugins; } public Client client(Node node, String clusterName) { @@ -917,7 +947,11 @@ public final class InternalTestCluster extends TestCluster { .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true) .put(settings); - TransportClient client = TransportClient.builder().settings(builder.build()).build(); + TransportClient.Builder clientBuilder = TransportClient.builder().settings(builder.build()); + for (Class plugin : plugins) { + clientBuilder.addPlugin(plugin); + } + TransportClient client = clientBuilder.build(); client.addTransportAddress(addr); return client; } diff --git a/core/src/test/java/org/elasticsearch/test/SettingsSource.java b/core/src/test/java/org/elasticsearch/test/NodeConfigurationSource.java similarity index 57% rename from core/src/test/java/org/elasticsearch/test/SettingsSource.java rename to core/src/test/java/org/elasticsearch/test/NodeConfigurationSource.java index 6341d842d67..e04e840e525 100644 --- a/core/src/test/java/org/elasticsearch/test/SettingsSource.java +++ b/core/src/test/java/org/elasticsearch/test/NodeConfigurationSource.java @@ -19,17 +19,21 @@ package org.elasticsearch.test; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; -public abstract class SettingsSource { +import java.util.Collection; +import java.util.Collections; - public static final SettingsSource EMPTY = new SettingsSource() { +public abstract class NodeConfigurationSource { + + public static final NodeConfigurationSource EMPTY = new NodeConfigurationSource() { @Override - public Settings node(int nodeOrdinal) { + public Settings nodeSettings(int nodeOrdinal) { return null; } @Override - public Settings transportClient() { + public Settings transportClientSettings() { return null; } }; @@ -37,8 +41,18 @@ public abstract class SettingsSource { /** * @return the settings for the node represented by the given ordinal, or {@code null} if there are no settings defined */ - public abstract Settings node(int nodeOrdinal); + public abstract Settings nodeSettings(int nodeOrdinal); - public abstract Settings transportClient(); + /** Returns plugins that should be loaded on the node */ + public Collection> nodePlugins() { + return Collections.emptyList(); + } + + public abstract Settings transportClientSettings(); + + /** Returns plugins that should be loaded in the transport client */ + public Collection> transportClientPlugins() { + return Collections.emptyList(); + } } diff --git a/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java index b7d0c2dd930..bcd9384a88b 100644 --- a/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java +++ b/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java @@ -25,18 +25,15 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.SettingsSource; -import org.elasticsearch.transport.local.LocalTransport; +import org.elasticsearch.test.NodeConfigurationSource; import java.io.IOException; -import java.net.Inet4Address; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.util.HashSet; import java.util.Set; -public class ClusterDiscoveryConfiguration extends SettingsSource { +public class ClusterDiscoveryConfiguration extends NodeConfigurationSource { static Settings DEFAULT_NODE_SETTINGS = Settings.settingsBuilder().put("discovery.type", "zen").build(); private static final String IP_ADDR = "127.0.0.1"; @@ -52,12 +49,12 @@ public class ClusterDiscoveryConfiguration extends SettingsSource { } @Override - public Settings node(int nodeOrdinal) { + public Settings nodeSettings(int nodeOrdinal) { return nodeSettings; } @Override - public Settings transportClient() { + public Settings transportClientSettings() { return transportClientSettings; } @@ -103,11 +100,11 @@ public class ClusterDiscoveryConfiguration extends SettingsSource { } private static int calcBasePort() { - return 30000 + InternalTestCluster.BASE_PORT; + return 30000 + InternalTestCluster.JVM_BASE_PORT_OFFEST; } @Override - public Settings node(int nodeOrdinal) { + public Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder(); String[] unicastHosts = new String[unicastHostOrdinals.length]; @@ -125,7 +122,7 @@ public class ClusterDiscoveryConfiguration extends SettingsSource { } } builder.putArray("discovery.zen.ping.unicast.hosts", unicastHosts); - return builder.put(super.node(nodeOrdinal)).build(); + return builder.put(super.nodeSettings(nodeOrdinal)).build(); } @SuppressForbidden(reason = "we know we pass a IP address") diff --git a/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java b/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java index e0cbfa5ed90..fc2b9469a73 100644 --- a/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java +++ b/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java @@ -21,22 +21,20 @@ package org.elasticsearch.test.disruption; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TransportModule; import org.junit.Test; import java.io.IOException; +import java.util.Collection; public class NetworkPartitionIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put("plugin.types", MockTransportService.TestPlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(MockTransportService.TestPlugin.class); } @Test diff --git a/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index cf5c00e1575..2770e3581d2 100644 --- a/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -26,13 +26,11 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.SettingsSource; +import org.elasticsearch.test.NodeConfigurationSource; import java.io.IOException; import java.nio.file.Path; -import java.util.Iterator; -import java.util.Map; -import java.util.Random; +import java.util.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; @@ -49,34 +47,48 @@ public class InternalTestClusterTests extends ESTestCase { int minNumDataNodes = randomIntBetween(0, 9); int maxNumDataNodes = randomIntBetween(minNumDataNodes, 10); String clusterName = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); - SettingsSource settingsSource = SettingsSource.EMPTY; + NodeConfigurationSource nodeConfigurationSource = NodeConfigurationSource.EMPTY; int numClientNodes = randomIntBetween(0, 10); boolean enableHttpPipelining = randomBoolean(); String nodePrefix = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); Path baseDir = createTempDir(); - InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, settingsSource, numClientNodes, enableHttpPipelining, nodePrefix); - InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, settingsSource, numClientNodes, enableHttpPipelining, nodePrefix); - assertClusters(cluster0, cluster1, true); + InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); + InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); + // TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way + assertClusters(cluster0, cluster1, false); } - public static void assertClusters(InternalTestCluster cluster0, InternalTestCluster cluster1, boolean assertClusterName) { + /** + * a set of settings that are expected to have different values betweem clusters, even they have been initialized with the same + * base settins. + */ + final static Set clusterUniqueSettings = new HashSet<>(); + + static { + clusterUniqueSettings.add(ClusterName.SETTING); + clusterUniqueSettings.add("transport.tcp.port"); + clusterUniqueSettings.add("http.port"); + clusterUniqueSettings.add("http.port"); + } + + public static void assertClusters(InternalTestCluster cluster0, InternalTestCluster cluster1, boolean checkClusterUniqueSettings) { Settings defaultSettings0 = cluster0.getDefaultSettings(); Settings defaultSettings1 = cluster1.getDefaultSettings(); - assertSettings(defaultSettings0, defaultSettings1, assertClusterName); + assertSettings(defaultSettings0, defaultSettings1, checkClusterUniqueSettings); assertThat(cluster0.numDataNodes(), equalTo(cluster1.numDataNodes())); - if (assertClusterName) { + if (checkClusterUniqueSettings) { assertThat(cluster0.getClusterName(), equalTo(cluster1.getClusterName())); } } - public static void assertSettings(Settings left, Settings right, boolean compareClusterName) { + public static void assertSettings(Settings left, Settings right, boolean checkClusterUniqueSettings) { ImmutableSet> entries0 = left.getAsMap().entrySet(); Map entries1 = right.getAsMap(); assertThat(entries0.size(), equalTo(entries1.size())); for (Map.Entry entry : entries0) { - if(entry.getKey().equals(ClusterName.SETTING) && compareClusterName == false) { + if (clusterUniqueSettings.contains(entry.getKey()) && checkClusterUniqueSettings == false) { continue; } assertThat(entries1, hasEntry(entry.getKey(), entry.getValue())); @@ -92,15 +104,15 @@ public class InternalTestClusterTests extends ESTestCase { /*while (clusterName.equals(clusterName1)) { clusterName1 = clusterName("shared", Integer.toString(CHILD_JVM_ID), clusterSeed); // spin until the time changes }*/ - SettingsSource settingsSource = SettingsSource.EMPTY; + NodeConfigurationSource nodeConfigurationSource = NodeConfigurationSource.EMPTY; int numClientNodes = randomIntBetween(0, 2); boolean enableHttpPipelining = randomBoolean(); int jvmOrdinal = randomIntBetween(0, 10); String nodePrefix = "foobar"; Path baseDir = createTempDir(); - InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, settingsSource, numClientNodes, enableHttpPipelining, nodePrefix); - InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, settingsSource, numClientNodes, enableHttpPipelining, nodePrefix); + InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); + InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); assertClusters(cluster0, cluster1, false); long seed = randomLong(); diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index 44b117aa4f2..3abca9be066 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -99,12 +99,16 @@ public class ContextAndHeaderTransportIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", ActionLoggingPlugin.class.getName()) .put("script.indexed", "on") .put(HTTP_ENABLED, true) .build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(ActionLoggingPlugin.class); + } + @Before public void createIndices() throws Exception { String mapping = jsonBuilder().startObject().startObject("type") diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java index e26998cc0a6..28701248817 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java @@ -47,6 +47,7 @@ import org.junit.Test; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.Collection; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -67,7 +68,12 @@ public class NettyTransportIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return settingsBuilder().put(super.nodeSettings(nodeOrdinal)) .put("node.mode", "network") - .extendArray("plugin.types", ExceptionThrowingNettyTransport.TestPlugin.class.getName()).build(); + .put(TransportModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); + } + + @Override + protected Collection> nodePlugins() { + return pluginList(ExceptionThrowingNettyTransport.TestPlugin.class); } @Test @@ -99,10 +105,6 @@ public class NettyTransportIT extends ESIntegTestCase { public void onModule(TransportModule transportModule) { transportModule.addTransport("exception-throwing", ExceptionThrowingNettyTransport.class); } - @Override - public Settings additionalSettings() { - return Settings.builder().put(TransportModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); - } } @Inject diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java index 11e5feed23e..2e11bdd3475 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.test.junit.rule.RepeatOnExceptionRule; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.TransportService; +import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -52,16 +53,30 @@ import static org.hamcrest.Matchers.is; public class NettyTransportMultiPortTests extends ESTestCase { private static final int MAX_RETRIES = 10; + private String host; @Rule public RepeatOnExceptionRule repeatOnBindExceptionRule = new RepeatOnExceptionRule(logger, MAX_RETRIES, BindTransportException.class); + @Before + public void setup() { + if (randomBoolean()) { + host = "localhost"; + } else { + if (NetworkUtils.SUPPORTS_V6 && randomBoolean()) { + host = "::1"; + } else { + host = "127.0.0.1"; + } + } + } + @Test public void testThatNettyCanBindToMultiplePorts() throws Exception { int[] ports = getRandomPorts(3); Settings settings = settingsBuilder() - .put("network.host", "127.0.0.1") + .put("network.host", host) .put("transport.tcp.port", ports[0]) .put("transport.profiles.default.port", ports[1]) .put("transport.profiles.client1.port", ports[2]) @@ -82,7 +97,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { int[] ports = getRandomPorts(2); Settings settings = settingsBuilder() - .put("network.host", "127.0.0.1") + .put("network.host", host) .put("transport.tcp.port", ports[0]) .put("transport.profiles.client1.port", ports[1]) .build(); @@ -101,7 +116,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { int[] ports = getRandomPorts(1); Settings settings = settingsBuilder() - .put("network.host", "127.0.0.1") + .put("network.host", host) .put("transport.tcp.port", ports[0]) .put("transport.profiles.client1.whatever", "foo") .build(); @@ -119,7 +134,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { int[] ports = getRandomPorts(3); Settings settings = settingsBuilder() - .put("network.host", "127.0.0.1") + .put("network.host", host) .put("transport.tcp.port", ports[0]) .put("transport.netty.port", ports[1]) .put("transport.profiles.default.port", ports[2]) @@ -140,7 +155,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { int[] ports = getRandomPorts(3); Settings settings = settingsBuilder() - .put("network.host", "127.0.0.1") + .put("network.host", host) .put("transport.tcp.port", ports[0]) // mimics someone trying to define a profile for .local which is the profile for a node request to itself .put("transport.profiles." + TransportService.DIRECT_RESPONSE_PROFILE + ".port", ports[1]) @@ -199,7 +214,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { private void assertConnectionRefused(int port) throws Exception { try { - trySocketConnection(new InetSocketTransportAddress(InetAddress.getByName("localhost"), port).address()); + trySocketConnection(new InetSocketTransportAddress(InetAddress.getByName(host), port).address()); fail("Expected to get exception when connecting to port " + port); } catch (IOException e) { // expected @@ -208,7 +223,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { } private void assertPortIsBound(int port) throws Exception { - assertPortIsBound("localhost", port); + assertPortIsBound(host, port); } private void assertPortIsBound(String host, int port) throws Exception { diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 4847d2b528e..a8478aef051 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -41,7 +41,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.SettingsSource; +import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.test.TestCluster; import org.junit.After; import org.junit.AfterClass; @@ -50,7 +50,6 @@ import org.junit.Test; import java.io.IOException; import java.util.ArrayList; -import java.util.List; import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -76,20 +75,8 @@ public class TribeIT extends ESIntegTestCase { @BeforeClass public static void setupSecondCluster() throws Exception { ESIntegTestCase.beforeClass(); - SettingsSource source = new SettingsSource() { - @Override - public Settings node(int nodeOrdinal) { - final int base = InternalTestCluster.BASE_PORT + 1000; - return Settings.builder().put("transport.tcp.port", base + "-" + (base + 100)).build(); - } - - @Override - public Settings transportClient() { - return node(0); - } - }; - // create another cluster - cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, Strings.randomBase64UUID(getRandom()), source, 0, false, SECOND_CLUSTER_NODE_PREFIX); + cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, + Strings.randomBase64UUID(getRandom()), NodeConfigurationSource.EMPTY, 0, false, SECOND_CLUSTER_NODE_PREFIX); cluster2.beforeTest(getRandom(), 0.1); cluster2.ensureAtLeastNumDataNodes(2); diff --git a/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptIT.java b/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptIT.java index 2e2edb64bbb..8b5ec861a51 100644 --- a/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptIT.java +++ b/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.junit.Test; +import java.util.Collection; import java.util.Map; import static org.hamcrest.Matchers.hasKey; @@ -46,11 +47,8 @@ import static org.hamcrest.Matchers.is; public class UpdateByNativeScriptIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .extendArray("plugin.types", CustomNativeScriptFactory.TestPlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(CustomNativeScriptFactory.TestPlugin.class); } @Test diff --git a/docs/plugins/authors.asciidoc b/docs/plugins/authors.asciidoc index 538b09ecfc1..2ce05fdb1f9 100644 --- a/docs/plugins/authors.asciidoc +++ b/docs/plugins/authors.asciidoc @@ -43,20 +43,13 @@ instance, see https://github.com/elastic/elasticsearch/blob/master/plugins/site-example/pom.xml[`plugins/site-example/pom.xml`]. [float] -=== Loading plugins from the classpath +=== Testing your plugin When testing a Java plugin, it will only be auto-loaded if it is in the -`plugins/` directory. If, instead, it is in your classpath, you can tell -Elasticsearch to load it with the `plugin.types` setting: +`plugins/` directory. Use `bin/plugin install file://path/to/your/plugin` +to install your plugin for testing. -[source,java] --------------------------- -settingsBuilder() - .put("cluster.name", cluster) - .put("path.home", getHome()) - .put("plugin.types", MyCustomPlugin.class.getName()) <1> - .build(); --------------------------- -<1> Tells Elasticsearch to load your plugin. +You may also load your plugin within the test framework for integration tests. +Read more in {ref}/integration-tests.html#changing-node-configuration[Changing Node Configuration]. diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 4d9578624c1..e69cfb43782 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -860,7 +860,7 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' In the previous section, we skipped over a little detail called the document score (`_score` field in the search results). The score is a numeric value that is a relative measure of how well the document matches the search query that we specified. The higher the score, the more relevant the document is, the lower the score, the less relevant the document is. -But queries do not always to produce scores, in particular when they are only used for "filtering" the document set. Elasticsearch detects these situations and automatically optimizes query execution in order not to compute useless scores. +But queries do not always need to produce scores, in particular when they are only used for "filtering" the document set. Elasticsearch detects these situations and automatically optimizes query execution in order not to compute useless scores. To understand filters, let's first introduce the <>, which allows you to combine a query (like `match_all`, `match`, `bool`, etc.) together with another query which is only used for filtering. As an example, let's introduce the <>, which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. diff --git a/docs/reference/migration/migrate_2_0/mapping.asciidoc b/docs/reference/migration/migrate_2_0/mapping.asciidoc index edeb243a9ca..0f3b94ea076 100644 --- a/docs/reference/migration/migrate_2_0/mapping.asciidoc +++ b/docs/reference/migration/migrate_2_0/mapping.asciidoc @@ -164,6 +164,12 @@ fields in differnt types, this warning has been relaxed: type names may now contain dots, but they may not *begin* with a dot. The only exception to this is the special `.percolator` type. +==== Type names may not be longer than 255 characters + +Mapping type names may not be longer than 255 characters. Long type names +will continue to function on indices created before upgrade, but it will not +be possible create types with long names in new indices. + ==== Types may no longer be deleted In 1.x it was possible to delete a type mapping, along with all of the @@ -364,6 +370,16 @@ Along with this change, the following settings have ben removed: * `index.mapper.default_mapping_location` * `index.mapper.default_percolator_mapping_location` +==== Fielddata formats + +Now that doc values are the default for fielddata, specialized in-memory +formats have become an esoteric option. These fielddata formats have been removed: + +* `fst` on string fields +* `compressed` on geo points + +The default fielddata format will be used instead. + ==== Posting and doc-values codecs It is no longer possible to specify per-field postings and doc values formats @@ -395,4 +411,4 @@ The default `position_increment_gap` is now 100. Indexes created in Elasticsearc to use the old default of 0. This was done to prevent phrase queries from matching across different values of the same term unexpectedly. Specifically, 100 was chosen to cause phrase queries with slops up to 99 to match only within -a single value of a field. \ No newline at end of file +a single value of a field. diff --git a/docs/reference/migration/migrate_2_0/settings.asciidoc b/docs/reference/migration/migrate_2_0/settings.asciidoc index b11fb0c0a9f..f5898a4ea19 100644 --- a/docs/reference/migration/migrate_2_0/settings.asciidoc +++ b/docs/reference/migration/migrate_2_0/settings.asciidoc @@ -38,6 +38,28 @@ PUT _settings In 2.0, the above request will throw an exception. Instead the refresh interval should be set to `"1s"` for one second. +==== Merge and merge throttling settings + +The tiered merge policy is now the only supported merge policy. These settings +have been removed: + +* `index.merge.policy.type` +* `index.merge.policy.min_merge_size` +* `index.merge.policy.max_merge_size` +* `index.merge.policy.merge_factor` +* `index.merge.policy.max_merge_docs` +* `index.merge.policy.calibrate_size_by_deletes` +* `index.merge.policy.min_merge_docs` +* `index.merge.policy.max_merge_docs` + +Merge throttling now uses a feedback loop to auto-throttle. These settings +have been removed: + +* `indices.store.throttle.type` +* `indices.store.throttle.max_bytes_per_sec` +* `index.store.throttle.type` +* `index.store.throttle.max_bytes_per_sec` + ==== Shadow replica settings The `node.enable_custom_paths` setting has been removed and replaced by the diff --git a/docs/reference/migration/migrate_2_0/stats.asciidoc b/docs/reference/migration/migrate_2_0/stats.asciidoc index 84635f5713e..b75246e4f11 100644 --- a/docs/reference/migration/migrate_2_0/stats.asciidoc +++ b/docs/reference/migration/migrate_2_0/stats.asciidoc @@ -45,16 +45,6 @@ used separately to control whether `routing_nodes` should be returned. The deprecated index status API has been removed. -==== `cat` APIs verbose by default - -The `cat` APIs now default to being verbose, which means they output column -headers by default. Verbosity can be turned off with the `v` parameter: - -[source,sh] ------------------ -GET _cat/shards?v=0 ------------------ - ==== Nodes Stats API Queue lengths are now reported as basic numeric so they can easily processed by code. Before we used a human diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index aa73c582284..ecfd168c144 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -147,15 +147,13 @@ The above sample configures the test to use a new cluster for each test method. [[changing-node-configuration]] ==== Changing node configuration -As elasticsearch is using JUnit 4, using the `@Before` and `@After` annotations is not a problem. However you should keep in mind, that this does not have any effect in your cluster setup, as the cluster is already up and running when those methods are run. So in case you want to configure settings - like loading a plugin on node startup - before the node is actually running, you should overwrite the `nodeSettings()` method from the `ElasticsearchIntegrationTest` class and change the cluster scope to `SUITE`. +As elasticsearch is using JUnit 4, using the `@Before` and `@After` annotations is not a problem. However you should keep in mind, that this does not have any effect in your cluster setup, as the cluster is already up and running when those methods are run. So in case you want to configure settings - like loading a plugin on node startup - before the node is actually running, you should overwrite the `nodePlugins()` method from the `ESIntegTestCase` class and return the plugin classes each node should load. [source,java] ----------------------------------------- @Override -protected Settings nodeSettings(int nodeOrdinal) { - return Settings.settingsBuilder() - .put("plugin.types", CustomSuggesterPlugin.class.getName()) - .put(super.nodeSettings(nodeOrdinal)).build(); +protected Collection> nodePlugins() { + return pluginList(CustomSuggesterPlugin.class); } ----------------------------------------- diff --git a/plugins/cloud-aws/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTest.java b/plugins/cloud-aws/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTest.java index a44a0f46b3a..b66cd943b11 100644 --- a/plugins/cloud-aws/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTest.java +++ b/plugins/cloud-aws/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTest.java @@ -24,11 +24,13 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ThirdParty; import org.junit.After; import org.junit.Before; +import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -75,7 +77,6 @@ public abstract class AbstractAwsTest extends ESIntegTestCase { Settings.Builder settings = Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put("path.home", createTempDir()) - .extendArray("plugin.types", CloudAwsPlugin.class.getName(), TestAwsS3Service.TestPlugin.class.getName()) .put("cloud.aws.test.random", randomInt()) .put("cloud.aws.test.write_failures", 0.1) .put("cloud.aws.test.read_failures", 0.1); @@ -92,4 +93,9 @@ public abstract class AbstractAwsTest extends ESIntegTestCase { } return settings.build(); } + + @Override + protected Collection> nodePlugins() { + return pluginList(CloudAwsPlugin.class, TestAwsS3Service.TestPlugin.class); + } } diff --git a/plugins/cloud-aws/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryITest.java b/plugins/cloud-aws/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryITest.java index 463131522a2..9af9e4df62c 100644 --- a/plugins/cloud-aws/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryITest.java +++ b/plugins/cloud-aws/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryITest.java @@ -23,10 +23,13 @@ package org.elasticsearch.discovery.ec2; import org.elasticsearch.cloud.aws.AbstractAwsTest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.junit.Test; +import java.util.Collection; + import static org.elasticsearch.common.settings.Settings.settingsBuilder; /** @@ -37,10 +40,14 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) public class Ec2DiscoveryITest extends AbstractAwsTest { + @Override + protected Collection> nodePlugins() { + return pluginList(CloudAwsPlugin.class); + } + @Test public void testStart() { Settings nodeSettings = settingsBuilder() - .put("plugin.types", CloudAwsPlugin.class.getName()) .put("cloud.enabled", true) .put("discovery.type", "ec2") .build(); diff --git a/plugins/cloud-aws/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsITest.java b/plugins/cloud-aws/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsITest.java index 98576118206..7dbe7647da3 100644 --- a/plugins/cloud-aws/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsITest.java +++ b/plugins/cloud-aws/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsITest.java @@ -24,10 +24,13 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResp import org.elasticsearch.cloud.aws.AbstractAwsTest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.junit.Test; +import java.util.Collection; + import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.CoreMatchers.is; @@ -39,10 +42,14 @@ import static org.hamcrest.CoreMatchers.is; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) public class Ec2DiscoveryUpdateSettingsITest extends AbstractAwsTest { + @Override + protected Collection> nodePlugins() { + return pluginList(CloudAwsPlugin.class); + } + @Test public void testMinimumMasterNodesStart() { Settings nodeSettings = settingsBuilder() - .put("plugin.types", CloudAwsPlugin.class.getName()) .put("cloud.enabled", true) .put("discovery.type", "ec2") .build(); diff --git a/plugins/cloud-aws/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/cloud-aws/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index 23441d5f509..393224105c0 100644 --- a/plugins/cloud-aws/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/cloud-aws/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -34,6 +34,7 @@ import org.elasticsearch.cloud.aws.AwsS3Service; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.snapshots.SnapshotMissingException; @@ -46,6 +47,7 @@ import org.junit.Before; import org.junit.Test; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import static org.hamcrest.Matchers.*; @@ -63,11 +65,15 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTest { .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) .put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE, false) .put("cloud.enabled", true) - .put("plugin.types", CloudAwsPlugin.class.getName()) .put("repositories.s3.base_path", basePath) .build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(CloudAwsPlugin.class); + } + private String basePath; @Before diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureComputeServiceTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureComputeServiceTest.java index 2dc5add0a3a..8df4df21470 100644 --- a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureComputeServiceTest.java +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureComputeServiceTest.java @@ -24,13 +24,16 @@ import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery; import org.elasticsearch.cloud.azure.management.AzureComputeService.Management; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugin.cloud.azure.CloudAzurePlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import java.util.Collection; + public abstract class AbstractAzureComputeServiceTest extends ESIntegTestCase { - private String mockPlugin; + private Class mockPlugin; - public AbstractAzureComputeServiceTest(String mockPlugin) { + public AbstractAzureComputeServiceTest(Class mockPlugin) { // We want to inject the Azure API Mock this.mockPlugin = mockPlugin; } @@ -41,8 +44,7 @@ public abstract class AbstractAzureComputeServiceTest extends ESIntegTestCase { .put(super.nodeSettings(nodeOrdinal)) .put("discovery.type", "azure") // We need the network to make the mock working - .put("node.mode", "network") - .extendArray("plugin.types", CloudAzurePlugin.class.getName(), mockPlugin); + .put("node.mode", "network"); // We add a fake subscription_id to start mock compute service builder.put(Management.SUBSCRIPTION_ID, "fake") @@ -53,6 +55,11 @@ public abstract class AbstractAzureComputeServiceTest extends ESIntegTestCase { return builder.build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(CloudAzurePlugin.class, mockPlugin); + } + protected void checkNumberOfNodes(int expected) { NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().execute().actionGet(); assertNotNull(nodeInfos); diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureRepositoryServiceTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureRepositoryServiceTest.java index fa4131cf7d0..d4ae582c849 100644 --- a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureRepositoryServiceTest.java +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureRepositoryServiceTest.java @@ -33,6 +33,7 @@ import org.junit.After; import org.junit.Before; import java.net.URISyntaxException; +import java.util.Collection; public abstract class AbstractAzureRepositoryServiceTest extends AbstractAzureTest { @@ -77,7 +78,6 @@ public abstract class AbstractAzureRepositoryServiceTest extends AbstractAzureTe @Override protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.settingsBuilder() - .extendArray("plugin.types", CloudAzurePlugin.class.getName(), TestPlugin.class.getName()) .put(Storage.API_IMPLEMENTATION, mock) .put(Storage.CONTAINER, "snapshots"); @@ -88,6 +88,11 @@ public abstract class AbstractAzureRepositoryServiceTest extends AbstractAzureTe return builder.build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(CloudAzurePlugin.class, TestPlugin.class); + } + @Override public Settings indexSettings() { // During restore we frequently restore index to exactly the same state it was before, that might cause the same diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java index 1263b4ba034..d99823b855c 100644 --- a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java @@ -24,9 +24,12 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.plugin.cloud.azure.CloudAzurePlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ThirdParty; +import java.util.Collection; + /** * Base class for Azure tests that require credentials. *

@@ -40,11 +43,15 @@ public abstract class AbstractAzureTest extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", CloudAzurePlugin.class.getName()) .put(readSettingsFromFile()) .build(); } + @Override + protected Collection> nodePlugins() { + return pluginList(CloudAzurePlugin.class); + } + protected Settings readSettingsFromFile() { Settings.Builder settings = Settings.builder(); settings.put("path.home", createTempDir()); diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java index 2cbdf4eaa3b..d2d559bc3ad 100644 --- a/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java @@ -44,7 +44,7 @@ import static org.hamcrest.Matchers.nullValue; public class AzureMinimumMasterNodesTest extends AbstractAzureComputeServiceTest { public AzureMinimumMasterNodesTest() { - super(AzureComputeServiceTwoNodesMock.TestPlugin.class.getName()); + super(AzureComputeServiceTwoNodesMock.TestPlugin.class); } @Override diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java index be69bde31ff..e46fadd7f03 100644 --- a/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java @@ -36,7 +36,7 @@ import static org.hamcrest.Matchers.notNullValue; public class AzureSimpleTest extends AbstractAzureComputeServiceTest { public AzureSimpleTest() { - super(AzureComputeServiceSimpleMock.TestPlugin.class.getName()); + super(AzureComputeServiceSimpleMock.TestPlugin.class); } @Test diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java index f5ec7427cfb..3b6287c65ce 100644 --- a/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java @@ -36,7 +36,7 @@ import static org.hamcrest.Matchers.notNullValue; public class AzureTwoStartedNodesTest extends AbstractAzureComputeServiceTest { public AzureTwoStartedNodesTest() { - super(AzureComputeServiceTwoNodesMock.TestPlugin.class.getName()); + super(AzureComputeServiceTwoNodesMock.TestPlugin.class); } @Test diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java index a335910e9f7..04407ce2d25 100644 --- a/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java @@ -22,18 +22,19 @@ package org.elasticsearch.index.store; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugin.cloud.azure.CloudAzurePlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; +import java.util.Collection; + import static org.hamcrest.Matchers.is; abstract public class AbstractAzureFsTest extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .extendArray("plugin.types", CloudAzurePlugin.class.getName()).build(); + protected Collection> nodePlugins() { + return pluginList(CloudAzurePlugin.class); } @Test diff --git a/plugins/delete-by-query/src/test/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryTests.java b/plugins/delete-by-query/src/test/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryTests.java index 9d79a230b2f..7b0a88ce752 100644 --- a/plugins/delete-by-query/src/test/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryTests.java +++ b/plugins/delete-by-query/src/test/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryTests.java @@ -35,10 +35,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.junit.Test; +import java.util.Collection; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -51,12 +53,10 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = SUITE, transportClientRatio = 0) public class DeleteByQueryTests extends ESIntegTestCase { - - protected Settings nodeSettings(int nodeOrdinal) { - Settings.Builder settings = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", DeleteByQueryPlugin.class.getName()); - return settings.build(); + + @Override + protected Collection> nodePlugins() { + return pluginList(DeleteByQueryPlugin.class); } @Test(expected = ActionRequestValidationException.class) diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java index aa82c5a20ee..73f7a73547c 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java @@ -24,10 +24,12 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.plugin.mapper.MapperSizePlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Locale; import java.util.Map; @@ -41,11 +43,8 @@ import static org.hamcrest.Matchers.notNullValue; public class SizeMappingIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("plugin.types", MapperSizePlugin.class.getName()) - .build(); + protected Collection> nodePlugins() { + return pluginList(MapperSizePlugin.class); } // issue 5053 diff --git a/pom.xml b/pom.xml index 84cb4af6eac..b4fa4af2da0 100644 --- a/pom.xml +++ b/pom.xml @@ -528,7 +528,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4 + 1.4.1 enforce-versions diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json index eabc596360c..07c72a2ac98 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json @@ -32,7 +32,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json index 11d0915fce0..9de5a729faa 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json @@ -37,7 +37,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json index 79c347d2548..2661af347d0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json @@ -32,7 +32,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json index b070281b92f..a380b558e89 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json @@ -37,7 +37,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false }, "fields": { "type": "list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json index 68bad5bc675..bd0cfcedd53 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json @@ -33,7 +33,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json index 0a5fe158136..b5c487b9521 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json @@ -42,7 +42,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json index ccce8d6dd2c..d3e35330e1d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json @@ -28,7 +28,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json index 157a33f0408..cdbe75ac936 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json @@ -28,7 +28,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json index 8a801c018dc..1d8bcda3c01 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json @@ -28,7 +28,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json index c8512a9f16d..88afa9b0c40 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json @@ -28,7 +28,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json index eab71f0ac40..55ee66a2792 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json @@ -26,7 +26,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json index 2fe2049ce40..c235406221a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json @@ -33,7 +33,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json index f032f4d38e6..ebb989b4cf6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json @@ -24,7 +24,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json index 5aa8e7c8e34..6d941f17d10 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json @@ -32,7 +32,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json index c1431ae0d61..cb8e5e13632 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json @@ -28,7 +28,7 @@ "v": { "type": "boolean", "description": "Verbose mode. Display column headers", - "default": true + "default": false }, "full_id": { "type": "boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml index dea14a45bd3..640d77e0183 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml @@ -17,8 +17,7 @@ "Empty cluster": - do: - cat.aliases: - v: false + cat.aliases: {} - match: $body: | @@ -38,8 +37,7 @@ name: test_alias - do: - cat.aliases: - v: false + cat.aliases: {} - match: $body: | @@ -75,8 +73,7 @@ term: foo: bar - do: - cat.aliases: - v: false + cat.aliases: {} - match: $body: | @@ -108,14 +105,12 @@ - do: cat.aliases: name: test_1 - v: false - match: $body: /^test_1 .+ \n$/ - do: cat.aliases: - v: false name: test_2 - match: @@ -123,7 +118,6 @@ - do: cat.aliases: - v: false name: test_* - match: @@ -178,8 +172,7 @@ - do: cat.aliases: - v: false - h: [index, alias] + h: [index, alias] - match: $body: /^ test \s+ test_1 \s+ $/ @@ -195,9 +188,3 @@ index \s+ alias \s+ \n test \s+ test_1 \s+ \n $/ - - - - - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.allocation/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.allocation/10_basic.yaml index ea508e15919..04a534e18b6 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.allocation/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.allocation/10_basic.yaml @@ -3,7 +3,6 @@ - do: cat.allocation: help: true - v: false - match: $body: | @@ -21,8 +20,7 @@ "Empty cluster": - do: - cat.allocation: - v: false + cat.allocation: {} - match: $body: | @@ -51,8 +49,7 @@ wait_for_status: yellow - do: - cat.allocation: - v: false + cat.allocation: {} - match: $body: | @@ -82,7 +79,6 @@ - do: cat.allocation: node_id: _master - v: false - match: $body: | @@ -102,7 +98,6 @@ - do: cat.allocation: node_id: non_existent - v: false - match: $body: | @@ -116,7 +111,6 @@ - do: cat.allocation: node_id: "*" - v: false - match: $body: | @@ -215,7 +209,6 @@ - do: cat.allocation: bytes: g - v: false - match: $body: | @@ -231,4 +224,3 @@ \n )+ $/ - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.count/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.count/10_basic.yaml index 76755327140..1a62ab063d9 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.count/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.count/10_basic.yaml @@ -14,8 +14,7 @@ "Test cat count output": - do: - cat.count: - v: false + cat.count: {} - match: $body: | @@ -31,8 +30,7 @@ refresh: true - do: - cat.count: - v: false + cat.count: {} - match: $body: | @@ -50,7 +48,6 @@ - do: cat.count: h: count - v: false - match: $body: | @@ -61,7 +58,6 @@ - do: cat.count: index: index1 - v: false - match: $body: | @@ -77,4 +73,3 @@ $body: | /^ epoch \s+ timestamp \s+ count \s+ \n \d+ \s+ \d{2}:\d{2}:\d{2} \s+ \d+ \s+ \n $/ - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.fielddata/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.fielddata/10_basic.yaml index ada97021e6e..bc362fae58c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.fielddata/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.fielddata/10_basic.yaml @@ -17,8 +17,7 @@ "Test cat fielddata output": - do: - cat.fielddata: - v: false + cat.fielddata: {} - do: index: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.health/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.health/10_basic.yaml index ad4fd2f5521..9bfde46a371 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.health/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.health/10_basic.yaml @@ -28,8 +28,7 @@ "Empty cluster": - do: - cat.health: - v: false + cat.health: {} - match: $body: | diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml index d8beff155c7..a5a67d1a557 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml @@ -2,8 +2,7 @@ "Test cat indices output": - do: - cat.indices: - v: false + cat.indices: {} - match: $body: | @@ -20,8 +19,7 @@ cluster.health: wait_for_status: yellow - do: - cat.indices: - v: false + cat.indices: {} - match: $body: | diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodeattrs/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodeattrs/10_basic.yaml index 5af720effec..f076a3b1859 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodeattrs/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodeattrs/10_basic.yaml @@ -6,8 +6,7 @@ reason: "Waiting for #12558" - do: - cat.nodeattrs: - v: false + cat.nodeattrs: {} - match: $body: | diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml index 12545f0a64b..66145f47b0a 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml @@ -2,8 +2,7 @@ "Test cat nodes output": - do: - cat.nodes: - v: false + cat.nodes: {} - match: $body: | diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml index 8ac01eaf317..4bc3e996fca 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml @@ -2,8 +2,7 @@ "Test cat recovery output": - do: - cat.recovery: - v: false + cat.recovery: {} - match: $body: | @@ -20,8 +19,7 @@ cluster.health: wait_for_status: yellow - do: - cat.recovery: - v: false + cat.recovery: {} - match: $body: | diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yaml index b64d14ebd50..c31eb1b79c5 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yaml @@ -26,8 +26,7 @@ "Test cat segments output": - do: - cat.segments: - v: false + cat.segments: {} - match: $body: | @@ -50,8 +49,7 @@ cluster.health: wait_for_status: green - do: - cat.segments: - v: false + cat.segments: {} - match: $body: | /^(index1 \s+ \d \s+ (p|r) \s+ \d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3} \s+ _\d (\s\d){3} \s+ @@ -81,8 +79,7 @@ - do: - cat.segments: - v: false + cat.segments: {} - match: $body: | /^(index(1|2) .+ \n?){2}$/ @@ -90,7 +87,6 @@ - do: cat.segments: index: index2 - v: false - match: $body: | /^(index2 .+ \n?)$/ @@ -118,4 +114,3 @@ catch: forbidden cat.segments: index: index1 - v: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml index 2838dcb1a55..40f8740db96 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml @@ -77,8 +77,7 @@ "Test cat shards output": - do: - cat.shards: - v: false + cat.shards: {} - match: $body: | @@ -95,8 +94,7 @@ cluster.health: wait_for_status: yellow - do: - cat.shards: - v: false + cat.shards: {} - match: $body: | @@ -115,8 +113,7 @@ wait_for_relocating_shards: 0 - do: - cat.shards: - v: false + cat.shards: {} - match: $body: | /^(index(1|2) \s+ \d \s+ (p|r) \s+ ((STARTED|INITIALIZING|RELOCATING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){15}$/ @@ -124,7 +121,6 @@ - do: cat.shards: index: index2 - v: false - match: $body: | /^(index2 \s+ \d \s+ (p|r) \s+ ((STARTED|INITIALIZING|RELOCATING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){5}$/ @@ -146,8 +142,6 @@ - do: cat.shards: index: index3 - v: false - match: $body: | /^(index3 \s+ \d \s+ (p|s) \s+ ((STARTED|INITIALIZING|RELOCATING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){2}$/ - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml index edb87ce27b9..0c8ac5b4028 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml @@ -2,8 +2,7 @@ "Test cat thread_pool output": - do: - cat.thread_pool: - v: false + cat.thread_pool: {} - match: $body: | @@ -22,7 +21,6 @@ - do: cat.thread_pool: h: pid,id,h,i,po - v: false - match: $body: |