Merge remote-tracking branch 'dakrone/include-cluster-info-in-explain-api'

This commit is contained in:
Lee Hinman 2016-07-12 16:26:46 -06:00
commit 95cf2407ee
11 changed files with 238 additions and 15 deletions

View File

@ -52,6 +52,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
private Integer shard; private Integer shard;
private Boolean primary; private Boolean primary;
private boolean includeYesDecisions = false; private boolean includeYesDecisions = false;
private boolean includeDiskInfo = false;
/** Explain the first unassigned shard */ /** Explain the first unassigned shard */
public ClusterAllocationExplainRequest() { public ClusterAllocationExplainRequest() {
@ -134,6 +135,16 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
return this.includeYesDecisions; return this.includeYesDecisions;
} }
/** {@code true} to include information about the gathered disk information of nodes in the cluster */
public void includeDiskInfo(boolean includeDiskInfo) {
this.includeDiskInfo = includeDiskInfo;
}
/** Returns true if information about disk usage and shard sizes should also be returned */
public boolean includeDiskInfo() {
return this.includeDiskInfo;
}
@Override @Override
public String toString() { public String toString() {
StringBuilder sb = new StringBuilder("ClusterAllocationExplainRequest["); StringBuilder sb = new StringBuilder("ClusterAllocationExplainRequest[");

View File

@ -53,6 +53,18 @@ public class ClusterAllocationExplainRequestBuilder
return this; return this;
} }
/** Whether to include "YES" decider decisions in the response instead of only "NO" decisions */
public ClusterAllocationExplainRequestBuilder setIncludeYesDecisions(boolean includeYesDecisions) {
request.includeYesDecisions(includeYesDecisions);
return this;
}
/** Whether to include information about the gathered disk information of nodes in the cluster */
public ClusterAllocationExplainRequestBuilder setIncludeDiskInfo(boolean includeDiskInfo) {
request.includeDiskInfo(includeDiskInfo);
return this;
}
/** /**
* Signal that the first unassigned shard should be used * Signal that the first unassigned shard should be used
*/ */

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.cluster.allocation; package org.elasticsearch.action.admin.cluster.allocation;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
@ -48,10 +49,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
private final long allocationDelayMillis; private final long allocationDelayMillis;
private final long remainingDelayMillis; private final long remainingDelayMillis;
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations; private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
private final ClusterInfo clusterInfo;
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis, public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis,
long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch, long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
Map<DiscoveryNode, NodeExplanation> nodeExplanations) { Map<DiscoveryNode, NodeExplanation> nodeExplanations, @Nullable ClusterInfo clusterInfo) {
this.shard = shard; this.shard = shard;
this.primary = primary; this.primary = primary;
this.hasPendingAsyncFetch = hasPendingAsyncFetch; this.hasPendingAsyncFetch = hasPendingAsyncFetch;
@ -60,6 +62,7 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
this.allocationDelayMillis = allocationDelayMillis; this.allocationDelayMillis = allocationDelayMillis;
this.remainingDelayMillis = remainingDelayMillis; this.remainingDelayMillis = remainingDelayMillis;
this.nodeExplanations = nodeExplanations; this.nodeExplanations = nodeExplanations;
this.clusterInfo = clusterInfo;
} }
public ClusterAllocationExplanation(StreamInput in) throws IOException { public ClusterAllocationExplanation(StreamInput in) throws IOException {
@ -78,6 +81,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation); nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation);
} }
this.nodeExplanations = nodeToExplanation; this.nodeExplanations = nodeToExplanation;
if (in.readBoolean()) {
this.clusterInfo = new ClusterInfo(in);
} else {
this.clusterInfo = null;
}
} }
@Override @Override
@ -94,6 +102,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
for (NodeExplanation explanation : this.nodeExplanations.values()) { for (NodeExplanation explanation : this.nodeExplanations.values()) {
explanation.writeTo(out); explanation.writeTo(out);
} }
if (this.clusterInfo != null) {
out.writeBoolean(true);
this.clusterInfo.writeTo(out);
} else {
out.writeBoolean(false);
}
} }
/** Return the shard that the explanation is about */ /** Return the shard that the explanation is about */
@ -143,6 +157,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
return this.nodeExplanations; return this.nodeExplanations;
} }
/** Return the cluster disk info for the cluster or null if none available */
@Nullable
public ClusterInfo getClusterInfo() {
return this.clusterInfo;
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); { builder.startObject(); {
builder.startObject("shard"); { builder.startObject("shard"); {
@ -164,11 +184,18 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis)); builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis));
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis)); builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
} }
builder.startObject("nodes"); builder.startObject("nodes"); {
for (NodeExplanation explanation : nodeExplanations.values()) { for (NodeExplanation explanation : nodeExplanations.values()) {
explanation.toXContent(builder, params); explanation.toXContent(builder, params);
} }
}
builder.endObject(); // end nodes builder.endObject(); // end nodes
if (this.clusterInfo != null) {
builder.startObject("cluster_info"); {
this.clusterInfo.toXContent(builder, params);
}
builder.endObject(); // end "cluster_info"
}
} }
builder.endObject(); // end wrapping object builder.endObject(); // end wrapping object
return builder; return builder;

View File

@ -28,6 +28,7 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
@ -219,7 +220,7 @@ public class TransportClusterAllocationExplainAction
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes, public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
boolean includeYesDecisions, ShardsAllocator shardAllocator, boolean includeYesDecisions, ShardsAllocator shardAllocator,
List<IndicesShardStoresResponse.StoreStatus> shardStores, List<IndicesShardStoresResponse.StoreStatus> shardStores,
GatewayAllocator gatewayAllocator) { GatewayAllocator gatewayAllocator, ClusterInfo clusterInfo) {
// don't short circuit deciders, we want a full explanation // don't short circuit deciders, we want a full explanation
allocation.debugDecision(true); allocation.debugDecision(true);
// get the existing unassigned info if available // get the existing unassigned info if available
@ -263,15 +264,16 @@ public class TransportClusterAllocationExplainAction
} }
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(), return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui, shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations); gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations, clusterInfo);
} }
@Override @Override
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state, protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
final ActionListener<ClusterAllocationExplainResponse> listener) { final ActionListener<ClusterAllocationExplainResponse> listener) {
final RoutingNodes routingNodes = state.getRoutingNodes(); final RoutingNodes routingNodes = state.getRoutingNodes();
final ClusterInfo clusterInfo = clusterInfoService.getClusterInfo();
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state, final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
clusterInfoService.getClusterInfo(), System.nanoTime(), false); clusterInfo, System.nanoTime(), false);
ShardRouting foundShard = null; ShardRouting foundShard = null;
if (request.useAnyUnassignedShard()) { if (request.useAnyUnassignedShard()) {
@ -318,7 +320,8 @@ public class TransportClusterAllocationExplainAction
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName()); shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id()); List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes, ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator); request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator,
request.includeDiskInfo() ? clusterInfo : null);
listener.onResponse(new ClusterAllocationExplainResponse(cae)); listener.onResponse(new ClusterAllocationExplainResponse(cae));
} }

View File

@ -19,16 +19,26 @@
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/** /**
* ClusterInfo is an object representing a map of nodes to {@link DiskUsage} * ClusterInfo is an object representing a map of nodes to {@link DiskUsage}
* and a map of shard ids to shard sizes, see * and a map of shard ids to shard sizes, see
* <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code> * <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code>
* for the key used in the shardSizes map * for the key used in the shardSizes map
*/ */
public class ClusterInfo { public class ClusterInfo implements ToXContent, Writeable {
private final ImmutableOpenMap<String, DiskUsage> leastAvailableSpaceUsage; private final ImmutableOpenMap<String, DiskUsage> leastAvailableSpaceUsage;
private final ImmutableOpenMap<String, DiskUsage> mostAvailableSpaceUsage; private final ImmutableOpenMap<String, DiskUsage> mostAvailableSpaceUsage;
final ImmutableOpenMap<String, Long> shardSizes; final ImmutableOpenMap<String, Long> shardSizes;
@ -57,6 +67,105 @@ public class ClusterInfo {
this.routingToDataPath = routingToDataPath; this.routingToDataPath = routingToDataPath;
} }
public ClusterInfo(StreamInput in) throws IOException {
int size = in.readInt();
Map<String, DiskUsage> leastMap = new HashMap<>(size);
for (int i = 0; i < size; i++) {
leastMap.put(in.readString(), new DiskUsage(in));
}
size = in.readInt();
Map<String, DiskUsage> mostMap = new HashMap<>(size);
for (int i = 0; i < size; i++) {
mostMap.put(in.readString(), new DiskUsage(in));
}
size = in.readInt();
Map<String, Long> sizeMap = new HashMap<>(size);
for (int i = 0; i < size; i++) {
sizeMap.put(in.readString(), in.readLong());
}
size = in.readInt();
Map<ShardRouting, String> routingMap = new HashMap<>(size);
for (int i = 0; i < size; i++) {
routingMap.put(new ShardRouting(in), in.readString());
}
ImmutableOpenMap.Builder<String, DiskUsage> leastBuilder = ImmutableOpenMap.builder();
this.leastAvailableSpaceUsage = leastBuilder.putAll(leastMap).build();
ImmutableOpenMap.Builder<String, DiskUsage> mostBuilder = ImmutableOpenMap.builder();
this.mostAvailableSpaceUsage = mostBuilder.putAll(mostMap).build();
ImmutableOpenMap.Builder<String, Long> sizeBuilder = ImmutableOpenMap.builder();
this.shardSizes = sizeBuilder.putAll(sizeMap).build();
ImmutableOpenMap.Builder<ShardRouting, String> routingBuilder = ImmutableOpenMap.builder();
this.routingToDataPath = routingBuilder.putAll(routingMap).build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(this.leastAvailableSpaceUsage.size());
for (ObjectObjectCursor<String, DiskUsage> c : this.leastAvailableSpaceUsage) {
out.writeString(c.key);
c.value.writeTo(out);
}
out.writeVInt(this.mostAvailableSpaceUsage.size());
for (ObjectObjectCursor<String, DiskUsage> c : this.mostAvailableSpaceUsage) {
out.writeString(c.key);
c.value.writeTo(out);
}
out.writeVInt(this.shardSizes.size());
for (ObjectObjectCursor<String, Long> c : this.shardSizes) {
out.writeString(c.key);
if (c.value == null) {
out.writeLong(-1);
} else {
out.writeLong(c.value);
}
}
out.writeVInt(this.routingToDataPath.size());
for (ObjectObjectCursor<ShardRouting, String> c : this.routingToDataPath) {
c.key.writeTo(out);
out.writeString(c.value);
}
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("nodes"); {
for (ObjectObjectCursor<String, DiskUsage> c : this.leastAvailableSpaceUsage) {
builder.startObject(c.key); { // node
builder.field("node_name", c.value.getNodeName());
builder.startObject("least_available"); {
c.value.toShortXContent(builder, params);
}
builder.endObject(); // end "least_available"
builder.startObject("most_available"); {
DiskUsage most = this.mostAvailableSpaceUsage.get(c.key);
if (most != null) {
most.toShortXContent(builder, params);
}
}
builder.endObject(); // end "most_available"
}
builder.endObject(); // end $nodename
}
}
builder.endObject(); // end "nodes"
builder.startObject("shard_sizes"); {
for (ObjectObjectCursor<String, Long> c : this.shardSizes) {
builder.byteSizeField(c.key + "_bytes", c.key, c.value);
}
}
builder.endObject(); // end "shard_sizes"
builder.startObject("shard_paths"); {
for (ObjectObjectCursor<ShardRouting, String> c : this.routingToDataPath) {
builder.field(c.key.toString(), c.value);
}
}
builder.endObject(); // end "shard_paths"
return builder;
}
/** /**
* Returns a node id to disk usage mapping for the path that has the least available space on the node. * Returns a node id to disk usage mapping for the path that has the least available space on the node.
*/ */

View File

@ -20,12 +20,19 @@
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/** /**
* Encapsulation class used to represent the amount of disk used on a node. * Encapsulation class used to represent the amount of disk used on a node.
*/ */
public class DiskUsage { public class DiskUsage implements ToXContent, Writeable {
final String nodeId; final String nodeId;
final String nodeName; final String nodeName;
final String path; final String path;
@ -44,6 +51,44 @@ public class DiskUsage {
this.path = path; this.path = path;
} }
public DiskUsage(StreamInput in) throws IOException {
this.nodeId = in.readString();
this.nodeName = in.readString();
this.path = in.readString();
this.totalBytes = in.readVLong();
this.freeBytes = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(this.nodeId);
out.writeString(this.nodeName);
out.writeString(this.path);
out.writeVLong(this.totalBytes);
out.writeVLong(this.freeBytes);
}
private static double truncatePercent(double pct) {
return Math.round(pct * 10.0) / 10.0;
}
public XContentBuilder toShortXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("path", this.path);
builder.byteSizeField("total_bytes", "total", this.totalBytes);
builder.byteSizeField("used_bytes", "used", this.getUsedBytes());
builder.byteSizeField("free_bytes", "free", this.freeBytes);
builder.field("free_disk_percent", truncatePercent(this.getFreeDiskAsPercentage()));
builder.field("used_disk_percent", truncatePercent(this.getUsedDiskAsPercentage()));
return builder;
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("node_id", this.nodeId);
builder.field("node_name", this.nodeName);
builder = toShortXContent(builder, params);
return builder;
}
public String getNodeId() { public String getNodeId() {
return nodeId; return nodeId;
} }

View File

@ -75,6 +75,7 @@ public class RestClusterAllocationExplainAction extends BaseRestHandler {
try { try {
req.includeYesDecisions(request.paramAsBoolean("include_yes_decisions", false)); req.includeYesDecisions(request.paramAsBoolean("include_yes_decisions", false));
req.includeDiskInfo(request.paramAsBoolean("include_disk_info", false));
client.admin().cluster().allocationExplain(req, new RestBuilderListener<ClusterAllocationExplainResponse>(channel) { client.admin().cluster().allocationExplain(req, new RestBuilderListener<ClusterAllocationExplainResponse>(channel) {
@Override @Override
public RestResponse buildResponse(ClusterAllocationExplainResponse response, XContentBuilder builder) throws Exception { public RestResponse buildResponse(ClusterAllocationExplainResponse response, XContentBuilder builder) throws Exception {

View File

@ -201,7 +201,7 @@ public final class ClusterAllocationExplanationTests extends ESTestCase {
yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false); yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false);
nodeExplanations.put(ne.getNode(), ne); nodeExplanations.put(ne.getNode(), ne);
ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shard, true, ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shard, true,
"assignedNode", allocationDelay, remainingDelay, null, false, nodeExplanations); "assignedNode", allocationDelay, remainingDelay, null, false, nodeExplanations, null);
BytesStreamOutput out = new BytesStreamOutput(); BytesStreamOutput out = new BytesStreamOutput();
cae.writeTo(out); cae.writeTo(out);
StreamInput in = out.bytes().streamInput(); StreamInput in = out.bytes().streamInput();
@ -237,7 +237,7 @@ public final class ClusterAllocationExplanationTests extends ESTestCase {
Map<DiscoveryNode, NodeExplanation> nodeExplanations = new HashMap<>(1); Map<DiscoveryNode, NodeExplanation> nodeExplanations = new HashMap<>(1);
nodeExplanations.put(ne.getNode(), ne); nodeExplanations.put(ne.getNode(), ne);
ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shardId, true, ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shardId, true,
"assignedNode", 42, 42, null, false, nodeExplanations); "assignedNode", 42, 42, null, false, nodeExplanations, null);
XContentBuilder builder = XContentFactory.jsonBuilder(); XContentBuilder builder = XContentFactory.jsonBuilder();
cae.toXContent(builder, ToXContent.EMPTY_PARAMS); cae.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\"shard\":{\"index\":\"foo\",\"index_uuid\":\"uuid\",\"id\":0,\"primary\":true},\"assigned\":true," + assertEquals("{\"shard\":{\"index\":\"foo\",\"index_uuid\":\"uuid\",\"id\":0,\"primary\":true},\"assigned\":true," +

View File

@ -183,10 +183,19 @@ shard it finds by sending an empty body, such as:
$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain' $ curl -XGET 'http://localhost:9200/_cluster/allocation/explain'
-------------------------------------------------- --------------------------------------------------
And if you would like to include all decisions that were factored into the final If you would like to include all decisions that were factored into the final
decision, the `include_yes_decisions` parameter will return all decisions: decision, the `include_yes_decisions` parameter will return all decisions:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain?include_yes_decisions=true' $ curl -XGET 'http://localhost:9200/_cluster/allocation/explain?include_yes_decisions=true'
-------------------------------------------------- --------------------------------------------------
Additionally, you can return information gathered by the cluster info service
about disk usage and shard sizes by setting the `include_disk_info` parameter to
`true`:
[source,js]
--------------------------------------------------
$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain?include_disk_info=true'
--------------------------------------------------

View File

@ -10,6 +10,10 @@
"include_yes_decisions": { "include_yes_decisions": {
"type": "boolean", "type": "boolean",
"description": "Return 'YES' decisions in explanation (default: false)" "description": "Return 'YES' decisions in explanation (default: false)"
},
"include_disk_info": {
"type": "boolean",
"description": "Return information about disk usage and shard sizes (default: false)"
} }
} }
}, },

View File

@ -60,7 +60,8 @@
- set: {master_node: node_id} - set: {master_node: node_id}
- do: - do:
cluster.allocation_explain: {} cluster.allocation_explain:
include_disk_info: true
- match: { assigned: false } - match: { assigned: false }
- match: { unassigned_info.reason: "INDEX_CREATED" } - match: { unassigned_info.reason: "INDEX_CREATED" }
@ -68,6 +69,7 @@
- match: { shard.index: "test" } - match: { shard.index: "test" }
- match: { shard.id: 0 } - match: { shard.id: 0 }
- match: { shard.primary: false } - match: { shard.primary: false }
- is_true: cluster_info
# - is_true: nodes.$node_id.node_name # - is_true: nodes.$node_id.node_name
# - match: { nodes.$node_id.node_attributes.testattr: "test" } # - match: { nodes.$node_id.node_attributes.testattr: "test" }
# - match: { nodes.$node_id.node_attributes.portsfile: "true" } # - match: { nodes.$node_id.node_attributes.portsfile: "true" }