mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-17 10:25:15 +00:00
Cluster Explain API uses the allocation process to explain shard allocation decisions (#22182)
This PR completes the refactoring of the cluster allocation explain API and improves it in the following two high-level ways: 1. The explain API now uses the same allocators that the AllocationService uses to make shard allocation decisions. Prior to this PR, the explain API would run the deciders against each node for the shard in question, but this was not executed on the same code path as the allocators, and many of the scenarios in shard allocation were not captured due to not executing through the same code paths as the allocators. 2. The APIs have changed, both on the Java and JSON level, to accurately capture the decisions made by the system. The APIs also now report on shard moving and rebalancing decisions, whereas the previous API did not report decisions for moving shards which cannot remain on their current node or rebalancing shards to form a more balanced cluster. Note: this change affects plugin developers who may have a custom implementation of the ShardsAllocator interface. The method weighShards has been removed and no longer has any utility. In order to support the new explain API, however, a custom implementation of ShardsAllocator must now implement ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) which provides a decision and explanation for allocating a single shard. For implementations that do not support explaining a single shard allocation via the cluster allocation explain API, this method can simply return an UnsupportedOperationException.
This commit is contained in:
parent
a3918ad094
commit
20ab4be59f
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
@ -46,30 +47,42 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
||||
PARSER.declareString(ClusterAllocationExplainRequest::setIndex, new ParseField("index"));
|
||||
PARSER.declareInt(ClusterAllocationExplainRequest::setShard, new ParseField("shard"));
|
||||
PARSER.declareBoolean(ClusterAllocationExplainRequest::setPrimary, new ParseField("primary"));
|
||||
PARSER.declareString(ClusterAllocationExplainRequest::setCurrentNode, new ParseField("current_node"));
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private String index;
|
||||
@Nullable
|
||||
private Integer shard;
|
||||
@Nullable
|
||||
private Boolean primary;
|
||||
@Nullable
|
||||
private String currentNode;
|
||||
private boolean includeYesDecisions = false;
|
||||
private boolean includeDiskInfo = false;
|
||||
|
||||
/** Explain the first unassigned shard */
|
||||
/**
|
||||
* Create a new allocation explain request to explain any unassigned shard in the cluster.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest() {
|
||||
this.index = null;
|
||||
this.shard = null;
|
||||
this.primary = null;
|
||||
this.currentNode = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new allocation explain request. If {@code primary} is false, the first unassigned replica
|
||||
* will be picked for explanation. If no replicas are unassigned, the first assigned replica will
|
||||
* be explained.
|
||||
*
|
||||
* Package private for testing.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest(String index, int shard, boolean primary) {
|
||||
ClusterAllocationExplainRequest(String index, int shard, boolean primary, @Nullable String currentNode) {
|
||||
this.index = index;
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.currentNode = currentNode;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -93,54 +106,103 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
||||
* Returns {@code true} iff the first unassigned shard is to be used
|
||||
*/
|
||||
public boolean useAnyUnassignedShard() {
|
||||
return this.index == null && this.shard == null && this.primary == null;
|
||||
return this.index == null && this.shard == null && this.primary == null && this.currentNode == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the index name of the shard to explain.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest setIndex(String index) {
|
||||
this.index = index;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the index name of the shard to explain, or {@code null} to use any unassigned shard (see {@link #useAnyUnassignedShard()}).
|
||||
*/
|
||||
@Nullable
|
||||
public String getIndex() {
|
||||
return this.index;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the shard id of the shard to explain.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest setShard(Integer shard) {
|
||||
this.shard = shard;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the shard id of the shard to explain, or {@code null} to use any unassigned shard (see {@link #useAnyUnassignedShard()}).
|
||||
*/
|
||||
@Nullable
|
||||
public Integer getShard() {
|
||||
return this.shard;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets whether to explain the allocation of the primary shard or a replica shard copy
|
||||
* for the shard id (see {@link #getShard()}).
|
||||
*/
|
||||
public ClusterAllocationExplainRequest setPrimary(Boolean primary) {
|
||||
this.primary = primary;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if explaining the primary shard for the shard id (see {@link #getShard()}),
|
||||
* {@code false} if explaining a replica shard copy for the shard id, or {@code null} to use any
|
||||
* unassigned shard (see {@link #useAnyUnassignedShard()}).
|
||||
*/
|
||||
@Nullable
|
||||
public Boolean isPrimary() {
|
||||
return this.primary;
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests the explain API to explain an already assigned replica shard currently allocated to
|
||||
* the given node.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest setCurrentNode(String currentNodeId) {
|
||||
this.currentNode = currentNodeId;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the node holding the replica shard to be explained. Returns {@code null} if any replica shard
|
||||
* can be explained.
|
||||
*/
|
||||
@Nullable
|
||||
public String getCurrentNode() {
|
||||
return currentNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to {@code true} to include yes decisions for a particular node.
|
||||
*/
|
||||
public void includeYesDecisions(boolean includeYesDecisions) {
|
||||
this.includeYesDecisions = includeYesDecisions;
|
||||
}
|
||||
|
||||
/** Returns true if all decisions should be included. Otherwise only "NO" and "THROTTLE" decisions are returned */
|
||||
/**
|
||||
* Returns {@code true} if yes decisions should be included. Otherwise only "no" and "throttle"
|
||||
* decisions are returned.
|
||||
*/
|
||||
public boolean includeYesDecisions() {
|
||||
return this.includeYesDecisions;
|
||||
}
|
||||
|
||||
/** {@code true} to include information about the gathered disk information of nodes in the cluster */
|
||||
/**
|
||||
* Set to {@code true} to include information about the gathered disk information of nodes in the cluster.
|
||||
*/
|
||||
public void includeDiskInfo(boolean includeDiskInfo) {
|
||||
this.includeDiskInfo = includeDiskInfo;
|
||||
}
|
||||
|
||||
/** Returns true if information about disk usage and shard sizes should also be returned */
|
||||
/**
|
||||
* Returns {@code true} if information about disk usage and shard sizes should also be returned.
|
||||
*/
|
||||
public boolean includeDiskInfo() {
|
||||
return this.includeDiskInfo;
|
||||
}
|
||||
@ -154,6 +216,9 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
||||
sb.append("index=").append(index);
|
||||
sb.append(",shard=").append(shard);
|
||||
sb.append(",primary?=").append(primary);
|
||||
if (currentNode != null) {
|
||||
sb.append(",currentNode=").append(currentNode);
|
||||
}
|
||||
}
|
||||
sb.append(",includeYesDecisions?=").append(includeYesDecisions);
|
||||
return sb.toString();
|
||||
@ -170,21 +235,32 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
checkVersion(in.getVersion());
|
||||
super.readFrom(in);
|
||||
this.index = in.readOptionalString();
|
||||
this.shard = in.readOptionalVInt();
|
||||
this.primary = in.readOptionalBoolean();
|
||||
this.currentNode = in.readOptionalString();
|
||||
this.includeYesDecisions = in.readBoolean();
|
||||
this.includeDiskInfo = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
checkVersion(out.getVersion());
|
||||
super.writeTo(out);
|
||||
out.writeOptionalString(index);
|
||||
out.writeOptionalVInt(shard);
|
||||
out.writeOptionalBoolean(primary);
|
||||
out.writeOptionalString(currentNode);
|
||||
out.writeBoolean(includeYesDecisions);
|
||||
out.writeBoolean(includeDiskInfo);
|
||||
}
|
||||
|
||||
private void checkVersion(Version version) {
|
||||
if (version.before(Version.V_5_2_0_UNRELEASED)) {
|
||||
throw new IllegalArgumentException("cannot explain shards in a mixed-cluster with pre-" + Version.V_5_2_0_UNRELEASED +
|
||||
" nodes, node version [" + version + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
@ -65,6 +64,15 @@ public class ClusterAllocationExplainRequestBuilder
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests the explain API to explain an already assigned replica shard currently allocated to
|
||||
* the given node.
|
||||
*/
|
||||
public ClusterAllocationExplainRequestBuilder setCurrentNode(String currentNode) {
|
||||
request.setCurrentNode(currentNode);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Signal that the first unassigned shard should be used
|
||||
*/
|
||||
|
@ -20,7 +20,6 @@
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
|
@ -21,285 +21,184 @@ package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.allocation.AbstractAllocationDecision.discoveryNodeToXContent;
|
||||
|
||||
/**
|
||||
* A {@code ClusterAllocationExplanation} is an explanation of why a shard may or may not be allocated to nodes. It also includes weights
|
||||
* for where the shard is likely to be assigned. It is an immutable class
|
||||
* A {@code ClusterAllocationExplanation} is an explanation of why a shard is unassigned,
|
||||
* or if it is not unassigned, then which nodes it could possibly be relocated to.
|
||||
* It is an immutable class.
|
||||
*/
|
||||
public final class ClusterAllocationExplanation implements ToXContent, Writeable {
|
||||
|
||||
private final ShardId shard;
|
||||
private final boolean primary;
|
||||
private final boolean hasPendingAsyncFetch;
|
||||
private final String assignedNodeId;
|
||||
private final UnassignedInfo unassignedInfo;
|
||||
private final long allocationDelayMillis;
|
||||
private final long remainingDelayMillis;
|
||||
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
|
||||
private final ShardRouting shardRouting;
|
||||
private final DiscoveryNode currentNode;
|
||||
private final DiscoveryNode relocationTargetNode;
|
||||
private final ClusterInfo clusterInfo;
|
||||
private final ShardAllocationDecision shardAllocationDecision;
|
||||
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis,
|
||||
long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations, @Nullable ClusterInfo clusterInfo) {
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.hasPendingAsyncFetch = hasPendingAsyncFetch;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
this.unassignedInfo = unassignedInfo;
|
||||
this.allocationDelayMillis = allocationDelayMillis;
|
||||
this.remainingDelayMillis = remainingDelayMillis;
|
||||
this.nodeExplanations = nodeExplanations;
|
||||
public ClusterAllocationExplanation(ShardRouting shardRouting, @Nullable DiscoveryNode currentNode,
|
||||
@Nullable DiscoveryNode relocationTargetNode, @Nullable ClusterInfo clusterInfo,
|
||||
ShardAllocationDecision shardAllocationDecision) {
|
||||
this.shardRouting = shardRouting;
|
||||
this.currentNode = currentNode;
|
||||
this.relocationTargetNode = relocationTargetNode;
|
||||
this.clusterInfo = clusterInfo;
|
||||
this.shardAllocationDecision = shardAllocationDecision;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation(StreamInput in) throws IOException {
|
||||
this.shard = ShardId.readShardId(in);
|
||||
this.primary = in.readBoolean();
|
||||
this.hasPendingAsyncFetch = in.readBoolean();
|
||||
this.assignedNodeId = in.readOptionalString();
|
||||
this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
|
||||
this.allocationDelayMillis = in.readVLong();
|
||||
this.remainingDelayMillis = in.readVLong();
|
||||
|
||||
int mapSize = in.readVInt();
|
||||
Map<DiscoveryNode, NodeExplanation> nodeToExplanation = new HashMap<>(mapSize);
|
||||
for (int i = 0; i < mapSize; i++) {
|
||||
NodeExplanation nodeExplanation = new NodeExplanation(in);
|
||||
nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation);
|
||||
}
|
||||
this.nodeExplanations = nodeToExplanation;
|
||||
if (in.readBoolean()) {
|
||||
this.clusterInfo = new ClusterInfo(in);
|
||||
} else {
|
||||
this.clusterInfo = null;
|
||||
}
|
||||
this.shardRouting = new ShardRouting(in);
|
||||
this.currentNode = in.readOptionalWriteable(DiscoveryNode::new);
|
||||
this.relocationTargetNode = in.readOptionalWriteable(DiscoveryNode::new);
|
||||
this.clusterInfo = in.readOptionalWriteable(ClusterInfo::new);
|
||||
this.shardAllocationDecision = new ShardAllocationDecision(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
this.getShard().writeTo(out);
|
||||
out.writeBoolean(this.isPrimary());
|
||||
out.writeBoolean(this.isStillFetchingShardData());
|
||||
out.writeOptionalString(this.getAssignedNodeId());
|
||||
out.writeOptionalWriteable(this.getUnassignedInfo());
|
||||
out.writeVLong(allocationDelayMillis);
|
||||
out.writeVLong(remainingDelayMillis);
|
||||
|
||||
out.writeVInt(this.nodeExplanations.size());
|
||||
for (NodeExplanation explanation : this.nodeExplanations.values()) {
|
||||
explanation.writeTo(out);
|
||||
}
|
||||
if (this.clusterInfo != null) {
|
||||
out.writeBoolean(true);
|
||||
this.clusterInfo.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
shardRouting.writeTo(out);
|
||||
out.writeOptionalWriteable(currentNode);
|
||||
out.writeOptionalWriteable(relocationTargetNode);
|
||||
out.writeOptionalWriteable(clusterInfo);
|
||||
shardAllocationDecision.writeTo(out);
|
||||
}
|
||||
|
||||
/** Return the shard that the explanation is about */
|
||||
/**
|
||||
* Returns the shard that the explanation is about.
|
||||
*/
|
||||
public ShardId getShard() {
|
||||
return this.shard;
|
||||
return shardRouting.shardId();
|
||||
}
|
||||
|
||||
/** Return true if the explained shard is primary, false otherwise */
|
||||
/**
|
||||
* Returns {@code true} if the explained shard is primary, {@code false} otherwise.
|
||||
*/
|
||||
public boolean isPrimary() {
|
||||
return this.primary;
|
||||
return shardRouting.primary();
|
||||
}
|
||||
|
||||
/** Return turn if shard data is still being fetched for the allocation */
|
||||
public boolean isStillFetchingShardData() {
|
||||
return this.hasPendingAsyncFetch;
|
||||
/**
|
||||
* Returns the current {@link ShardRoutingState} of the shard.
|
||||
*/
|
||||
public ShardRoutingState getShardState() {
|
||||
return shardRouting.state();
|
||||
}
|
||||
|
||||
/** Return turn if the shard is assigned to a node */
|
||||
public boolean isAssigned() {
|
||||
return this.assignedNodeId != null;
|
||||
}
|
||||
|
||||
/** Return the assigned node id or null if not assigned */
|
||||
/**
|
||||
* Returns the currently assigned node, or {@code null} if the shard is unassigned.
|
||||
*/
|
||||
@Nullable
|
||||
public String getAssignedNodeId() {
|
||||
return this.assignedNodeId;
|
||||
public DiscoveryNode getCurrentNode() {
|
||||
return currentNode;
|
||||
}
|
||||
|
||||
/** Return the unassigned info for the shard or null if the shard is assigned */
|
||||
/**
|
||||
* Returns the relocating target node, or {@code null} if the shard is not in the {@link ShardRoutingState#RELOCATING} state.
|
||||
*/
|
||||
@Nullable
|
||||
public DiscoveryNode getRelocationTargetNode() {
|
||||
return relocationTargetNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the unassigned info for the shard, or {@code null} if the shard is active.
|
||||
*/
|
||||
@Nullable
|
||||
public UnassignedInfo getUnassignedInfo() {
|
||||
return this.unassignedInfo;
|
||||
return shardRouting.unassignedInfo();
|
||||
}
|
||||
|
||||
/** Return the configured delay before the shard can be allocated in milliseconds */
|
||||
public long getAllocationDelayMillis() {
|
||||
return this.allocationDelayMillis;
|
||||
}
|
||||
|
||||
/** Return the remaining allocation delay for this shard in milliseconds */
|
||||
public long getRemainingDelayMillis() {
|
||||
return this.remainingDelayMillis;
|
||||
}
|
||||
|
||||
/** Return a map of node to the explanation for that node */
|
||||
public Map<DiscoveryNode, NodeExplanation> getNodeExplanations() {
|
||||
return this.nodeExplanations;
|
||||
}
|
||||
|
||||
/** Return the cluster disk info for the cluster or null if none available */
|
||||
/**
|
||||
* Returns the cluster disk info for the cluster, or {@code null} if none available.
|
||||
*/
|
||||
@Nullable
|
||||
public ClusterInfo getClusterInfo() {
|
||||
return this.clusterInfo;
|
||||
}
|
||||
|
||||
/** \
|
||||
* Returns the shard allocation decision for attempting to assign or move the shard.
|
||||
*/
|
||||
public ShardAllocationDecision getShardAllocationDecision() {
|
||||
return shardAllocationDecision;
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(); {
|
||||
builder.startObject("shard"); {
|
||||
builder.field("index", shard.getIndexName());
|
||||
builder.field("index_uuid", shard.getIndex().getUUID());
|
||||
builder.field("id", shard.getId());
|
||||
builder.field("primary", primary);
|
||||
builder.field("index", shardRouting.getIndexName());
|
||||
builder.field("shard", shardRouting.getId());
|
||||
builder.field("primary", shardRouting.primary());
|
||||
builder.field("current_state", shardRouting.state().toString().toLowerCase(Locale.ROOT));
|
||||
if (shardRouting.unassignedInfo() != null) {
|
||||
unassignedInfoToXContent(shardRouting.unassignedInfo(), builder);
|
||||
}
|
||||
builder.endObject(); // end shard
|
||||
builder.field("assigned", this.assignedNodeId != null);
|
||||
// If assigned, show the node id of the node it's assigned to
|
||||
if (assignedNodeId != null) {
|
||||
builder.field("assigned_node_id", this.assignedNodeId);
|
||||
}
|
||||
builder.field("shard_state_fetch_pending", this.hasPendingAsyncFetch);
|
||||
// If we have unassigned info, show that
|
||||
if (unassignedInfo != null) {
|
||||
unassignedInfo.toXContent(builder, params);
|
||||
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis));
|
||||
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
|
||||
}
|
||||
builder.startObject("nodes"); {
|
||||
for (NodeExplanation explanation : nodeExplanations.values()) {
|
||||
explanation.toXContent(builder, params);
|
||||
if (currentNode != null) {
|
||||
builder.startObject("current_node");
|
||||
{
|
||||
discoveryNodeToXContent(currentNode, true, builder);
|
||||
if (shardAllocationDecision.getMoveDecision().isDecisionTaken()
|
||||
&& shardAllocationDecision.getMoveDecision().getCurrentNodeRanking() > 0) {
|
||||
builder.field("weight_ranking", shardAllocationDecision.getMoveDecision().getCurrentNodeRanking());
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject(); // end nodes
|
||||
if (this.clusterInfo != null) {
|
||||
builder.startObject("cluster_info"); {
|
||||
this.clusterInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end "cluster_info"
|
||||
}
|
||||
if (shardAllocationDecision.isDecisionTaken()) {
|
||||
shardAllocationDecision.toXContent(builder, params);
|
||||
} else {
|
||||
String explanation;
|
||||
if (shardRouting.state() == ShardRoutingState.RELOCATING) {
|
||||
explanation = "the shard is in the process of relocating from node [" + currentNode.getName() + "] " +
|
||||
"to node [" + relocationTargetNode.getName() + "], wait until relocation has completed";
|
||||
} else {
|
||||
assert shardRouting.state() == ShardRoutingState.INITIALIZING;
|
||||
explanation = "the shard is in the process of initializing on node [" + currentNode.getName() + "], " +
|
||||
"wait until initialization has completed";
|
||||
}
|
||||
builder.field("explanation", explanation);
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end wrapping object
|
||||
return builder;
|
||||
}
|
||||
|
||||
/** An Enum representing the final decision for a shard allocation on a node */
|
||||
public enum FinalDecision {
|
||||
// Yes, the shard can be assigned
|
||||
YES((byte) 0),
|
||||
// No, the shard cannot be assigned
|
||||
NO((byte) 1),
|
||||
// The shard is already assigned to this node
|
||||
ALREADY_ASSIGNED((byte) 2);
|
||||
private XContentBuilder unassignedInfoToXContent(UnassignedInfo unassignedInfo, XContentBuilder builder)
|
||||
throws IOException {
|
||||
|
||||
private final byte id;
|
||||
|
||||
FinalDecision (byte id) {
|
||||
this.id = id;
|
||||
builder.startObject("unassigned_info");
|
||||
builder.field("reason", unassignedInfo.getReason());
|
||||
builder.field("at", UnassignedInfo.DATE_TIME_FORMATTER.printer().print(unassignedInfo.getUnassignedTimeInMillis()));
|
||||
if (unassignedInfo.getNumFailedAllocations() > 0) {
|
||||
builder.field("failed_allocation_attempts", unassignedInfo.getNumFailedAllocations());
|
||||
}
|
||||
|
||||
private static FinalDecision fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0: return YES;
|
||||
case 1: return NO;
|
||||
case 2: return ALREADY_ASSIGNED;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for final decision: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
switch (id) {
|
||||
case 0: return "YES";
|
||||
case 1: return "NO";
|
||||
case 2: return "ALREADY_ASSIGNED";
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for final decision: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
static FinalDecision readFrom(StreamInput in) throws IOException {
|
||||
return fromId(in.readByte());
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(id);
|
||||
}
|
||||
}
|
||||
|
||||
/** An Enum representing the state of the shard store's copy of the data on a node */
|
||||
public enum StoreCopy {
|
||||
// No data for this shard is on the node
|
||||
NONE((byte) 0),
|
||||
// A copy of the data is available on this node
|
||||
AVAILABLE((byte) 1),
|
||||
// The copy of the data on the node is corrupt
|
||||
CORRUPT((byte) 2),
|
||||
// There was an error reading this node's copy of the data
|
||||
IO_ERROR((byte) 3),
|
||||
// The copy of the data on the node is stale
|
||||
STALE((byte) 4),
|
||||
// It's unknown what the copy of the data is
|
||||
UNKNOWN((byte) 5);
|
||||
|
||||
private final byte id;
|
||||
|
||||
StoreCopy (byte id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
private static StoreCopy fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0: return NONE;
|
||||
case 1: return AVAILABLE;
|
||||
case 2: return CORRUPT;
|
||||
case 3: return IO_ERROR;
|
||||
case 4: return STALE;
|
||||
case 5: return UNKNOWN;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for store copy: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
switch (id) {
|
||||
case 0: return "NONE";
|
||||
case 1: return "AVAILABLE";
|
||||
case 2: return "CORRUPT";
|
||||
case 3: return "IO_ERROR";
|
||||
case 4: return "STALE";
|
||||
case 5: return "UNKNOWN";
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for store copy: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
static StoreCopy readFrom(StreamInput in) throws IOException {
|
||||
return fromId(in.readByte());
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(id);
|
||||
String details = unassignedInfo.getDetails();
|
||||
if (details != null) {
|
||||
builder.field("details", details);
|
||||
}
|
||||
builder.field("last_allocation_status", AllocationDecision.fromAllocationStatus(unassignedInfo.getLastAllocationStatus()));
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
@ -1,147 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
/** The cluster allocation explanation for a single node */
|
||||
public class NodeExplanation implements Writeable, ToXContent {
|
||||
private final DiscoveryNode node;
|
||||
private final Decision nodeDecision;
|
||||
private final Float nodeWeight;
|
||||
private final IndicesShardStoresResponse.StoreStatus storeStatus;
|
||||
private final ClusterAllocationExplanation.FinalDecision finalDecision;
|
||||
private final ClusterAllocationExplanation.StoreCopy storeCopy;
|
||||
private final String finalExplanation;
|
||||
|
||||
public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight,
|
||||
@Nullable final IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision,
|
||||
final String finalExplanation,
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy) {
|
||||
this.node = node;
|
||||
this.nodeDecision = nodeDecision;
|
||||
this.nodeWeight = nodeWeight;
|
||||
this.storeStatus = storeStatus;
|
||||
this.finalDecision = finalDecision;
|
||||
this.finalExplanation = finalExplanation;
|
||||
this.storeCopy = storeCopy;
|
||||
}
|
||||
|
||||
public NodeExplanation(StreamInput in) throws IOException {
|
||||
this.node = new DiscoveryNode(in);
|
||||
this.nodeDecision = Decision.readFrom(in);
|
||||
this.nodeWeight = in.readFloat();
|
||||
if (in.readBoolean()) {
|
||||
this.storeStatus = IndicesShardStoresResponse.StoreStatus.readStoreStatus(in);
|
||||
} else {
|
||||
this.storeStatus = null;
|
||||
}
|
||||
this.finalDecision = ClusterAllocationExplanation.FinalDecision.readFrom(in);
|
||||
this.finalExplanation = in.readString();
|
||||
this.storeCopy = ClusterAllocationExplanation.StoreCopy.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
nodeDecision.writeTo(out);
|
||||
out.writeFloat(nodeWeight);
|
||||
if (storeStatus == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
storeStatus.writeTo(out);
|
||||
}
|
||||
finalDecision.writeTo(out);
|
||||
out.writeString(finalExplanation);
|
||||
storeCopy.writeTo(out);
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(node.getId()); {
|
||||
builder.field("node_name", node.getName());
|
||||
builder.startObject("node_attributes"); {
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue());
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end attributes
|
||||
builder.startObject("store"); {
|
||||
builder.field("shard_copy", storeCopy.toString());
|
||||
if (storeStatus != null) {
|
||||
final Throwable storeErr = storeStatus.getStoreException();
|
||||
if (storeErr != null) {
|
||||
builder.field("store_exception", ExceptionsHelper.detailedMessage(storeErr));
|
||||
}
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end store
|
||||
builder.field("final_decision", finalDecision.toString());
|
||||
builder.field("final_explanation", finalExplanation);
|
||||
builder.field("weight", nodeWeight);
|
||||
builder.startArray("decisions");
|
||||
nodeDecision.toXContent(builder, params);
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject(); // end node <uuid>
|
||||
return builder;
|
||||
}
|
||||
|
||||
public DiscoveryNode getNode() {
|
||||
return this.node;
|
||||
}
|
||||
|
||||
public Decision getDecision() {
|
||||
return this.nodeDecision;
|
||||
}
|
||||
|
||||
public Float getWeight() {
|
||||
return this.nodeWeight;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IndicesShardStoresResponse.StoreStatus getStoreStatus() {
|
||||
return this.storeStatus;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation.FinalDecision getFinalDecision() {
|
||||
return this.finalDecision;
|
||||
}
|
||||
|
||||
public String getFinalExplanation() {
|
||||
return this.finalExplanation;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation.StoreCopy getStoreCopy() {
|
||||
return this.storeCopy;
|
||||
}
|
||||
}
|
@ -19,13 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
@ -33,34 +27,25 @@ import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.MoveDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.DebugMode;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
|
||||
|
||||
/**
|
||||
* The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the
|
||||
@ -72,7 +57,6 @@ public class TransportClusterAllocationExplainAction
|
||||
private final ClusterInfoService clusterInfoService;
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private final ShardsAllocator shardAllocator;
|
||||
private final TransportIndicesShardStoresAction shardStoresAction;
|
||||
private final GatewayAllocator gatewayAllocator;
|
||||
|
||||
@Inject
|
||||
@ -80,14 +64,12 @@ public class TransportClusterAllocationExplainAction
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders,
|
||||
ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction,
|
||||
GatewayAllocator gatewayAllocator) {
|
||||
ShardsAllocator shardAllocator, GatewayAllocator gatewayAllocator) {
|
||||
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardAllocator = shardAllocator;
|
||||
this.shardStoresAction = shardStoresAction;
|
||||
this.gatewayAllocator = gatewayAllocator;
|
||||
}
|
||||
|
||||
@ -106,172 +88,6 @@ public class TransportClusterAllocationExplainAction
|
||||
return new ClusterAllocationExplainResponse();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the decisions for the given {@code ShardRouting} on the given {@code RoutingNode}. If {@code includeYesDecisions} is not true,
|
||||
* only non-YES (NO and THROTTLE) decisions are returned.
|
||||
*/
|
||||
public static Decision tryShardOnNode(ShardRouting shard, RoutingNode node, RoutingAllocation allocation, boolean includeYesDecisions) {
|
||||
Decision d = allocation.deciders().canAllocate(shard, node, allocation);
|
||||
if (includeYesDecisions) {
|
||||
return d;
|
||||
} else {
|
||||
Decision.Multi nonYesDecisions = new Decision.Multi();
|
||||
List<Decision> decisions = d.getDecisions();
|
||||
for (Decision decision : decisions) {
|
||||
if (decision.type() != Decision.Type.YES) {
|
||||
nonYesDecisions.add(decision);
|
||||
}
|
||||
}
|
||||
return nonYesDecisions;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a {@code WeightedDecision} object for the given shard given all the metadata. This also attempts to construct the human
|
||||
* readable FinalDecision and final explanation as part of the explanation.
|
||||
*/
|
||||
public static NodeExplanation calculateNodeExplanation(ShardRouting shard,
|
||||
IndexMetaData indexMetaData,
|
||||
DiscoveryNode node,
|
||||
Decision nodeDecision,
|
||||
Float nodeWeight,
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
String assignedNodeId,
|
||||
Set<String> activeAllocationIds,
|
||||
boolean hasPendingAsyncFetch) {
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision;
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy;
|
||||
final String finalExplanation;
|
||||
|
||||
if (storeStatus == null) {
|
||||
// No copies of the data
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.NONE;
|
||||
} else {
|
||||
final Exception storeErr = storeStatus.getStoreException();
|
||||
if (storeErr != null) {
|
||||
if (ExceptionsHelper.unwrapCause(storeErr) instanceof CorruptIndexException) {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.CORRUPT;
|
||||
} else {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.IO_ERROR;
|
||||
}
|
||||
} else if (activeAllocationIds.isEmpty()) {
|
||||
// The ids are only empty if dealing with a legacy index
|
||||
// TODO: fetch the shard state versions and display here?
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.UNKNOWN;
|
||||
} else if (activeAllocationIds.contains(storeStatus.getAllocationId())) {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.AVAILABLE;
|
||||
} else {
|
||||
// Otherwise, this is a stale copy of the data (allocation ids don't match)
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.STALE;
|
||||
}
|
||||
}
|
||||
|
||||
if (node.getId().equals(assignedNodeId)) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
|
||||
finalExplanation = "the shard is already assigned to this node";
|
||||
} else if (shard.unassigned() && shard.primary() == false &&
|
||||
shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && nodeDecision.type() != Decision.Type.YES) {
|
||||
finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() +
|
||||
" decision";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.unassigned() && shard.primary() == false &&
|
||||
shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && hasPendingAsyncFetch) {
|
||||
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() &&
|
||||
(shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE ||
|
||||
shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT)
|
||||
&& hasPendingAsyncFetch) {
|
||||
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
|
||||
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.NONE) {
|
||||
finalExplanation = "there is no copy of the shard available";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) {
|
||||
finalExplanation = "the copy of the shard is corrupt";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) {
|
||||
finalExplanation = "the copy of the shard cannot be read";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else {
|
||||
if (nodeDecision.type() == Decision.Type.NO) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision";
|
||||
} else {
|
||||
// TODO: handle throttling decision better here
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.YES;
|
||||
if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) {
|
||||
finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data";
|
||||
} else {
|
||||
finalExplanation = "the shard can be assigned";
|
||||
}
|
||||
}
|
||||
}
|
||||
return new NodeExplanation(node, nodeDecision, nodeWeight, storeStatus, finalDecision, finalExplanation, storeCopy);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* For the given {@code ShardRouting}, return the explanation of the allocation for that shard on all nodes. If {@code
|
||||
* includeYesDecisions} is true, returns all decisions, otherwise returns only 'NO' and 'THROTTLE' decisions.
|
||||
*/
|
||||
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
|
||||
boolean includeYesDecisions, ShardsAllocator shardAllocator,
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStores,
|
||||
GatewayAllocator gatewayAllocator, ClusterInfo clusterInfo) {
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// get the existing unassigned info if available
|
||||
UnassignedInfo ui = shard.unassignedInfo();
|
||||
|
||||
Map<DiscoveryNode, Decision> nodeToDecision = new HashMap<>();
|
||||
for (RoutingNode node : routingNodes) {
|
||||
DiscoveryNode discoNode = node.node();
|
||||
if (discoNode.isDataNode()) {
|
||||
Decision d = tryShardOnNode(shard, node, allocation, includeYesDecisions);
|
||||
nodeToDecision.put(discoNode, d);
|
||||
}
|
||||
}
|
||||
long remainingDelayMillis = 0;
|
||||
final MetaData metadata = allocation.metaData();
|
||||
final IndexMetaData indexMetaData = metadata.index(shard.index());
|
||||
long allocationDelayMillis = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).getMillis();
|
||||
if (ui != null && ui.isDelayed()) {
|
||||
long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), indexMetaData.getSettings());
|
||||
remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis();
|
||||
}
|
||||
|
||||
// Calculate weights for each of the nodes
|
||||
Map<DiscoveryNode, Float> weights = shardAllocator.weighShard(allocation, shard);
|
||||
|
||||
Map<DiscoveryNode, IndicesShardStoresResponse.StoreStatus> nodeToStatus = new HashMap<>(shardStores.size());
|
||||
for (IndicesShardStoresResponse.StoreStatus status : shardStores) {
|
||||
nodeToStatus.put(status.getNode(), status);
|
||||
}
|
||||
|
||||
Map<DiscoveryNode, NodeExplanation> explanations = new HashMap<>(shardStores.size());
|
||||
for (Map.Entry<DiscoveryNode, Decision> entry : nodeToDecision.entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
Decision decision = entry.getValue();
|
||||
Float weight = weights.get(node);
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
|
||||
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.inSyncAllocationIds(shard.getId()),
|
||||
allocation.hasPendingAsyncFetch());
|
||||
explanations.put(node, nodeExplanation);
|
||||
}
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
|
||||
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
|
||||
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations, clusterInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
|
||||
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||
@ -280,66 +96,105 @@ public class TransportClusterAllocationExplainAction
|
||||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
|
||||
clusterInfo, System.nanoTime(), false);
|
||||
|
||||
ShardRouting shardRouting = findShardToExplain(request, allocation);
|
||||
logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting);
|
||||
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation,
|
||||
request.includeDiskInfo() ? clusterInfo : null, request.includeYesDecisions(), gatewayAllocator, shardAllocator);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
// public for testing
|
||||
public static ClusterAllocationExplanation explainShard(ShardRouting shardRouting, RoutingAllocation allocation,
|
||||
ClusterInfo clusterInfo, boolean includeYesDecisions,
|
||||
GatewayAllocator gatewayAllocator, ShardsAllocator shardAllocator) {
|
||||
allocation.setDebugMode(includeYesDecisions ? DebugMode.ON : DebugMode.EXCLUDE_YES_DECISIONS);
|
||||
|
||||
ShardAllocationDecision shardDecision;
|
||||
if (shardRouting.initializing() || shardRouting.relocating()) {
|
||||
shardDecision = ShardAllocationDecision.NOT_TAKEN;
|
||||
} else {
|
||||
AllocateUnassignedDecision allocateDecision = shardRouting.unassigned() ?
|
||||
gatewayAllocator.decideUnassignedShardAllocation(shardRouting, allocation) : AllocateUnassignedDecision.NOT_TAKEN;
|
||||
if (allocateDecision.isDecisionTaken() == false) {
|
||||
shardDecision = shardAllocator.decideShardAllocation(shardRouting, allocation);
|
||||
} else {
|
||||
shardDecision = new ShardAllocationDecision(allocateDecision, MoveDecision.NOT_TAKEN);
|
||||
}
|
||||
}
|
||||
|
||||
return new ClusterAllocationExplanation(shardRouting,
|
||||
shardRouting.currentNodeId() != null ? allocation.nodes().get(shardRouting.currentNodeId()) : null,
|
||||
shardRouting.relocatingNodeId() != null ? allocation.nodes().get(shardRouting.relocatingNodeId()) : null,
|
||||
clusterInfo, shardDecision);
|
||||
}
|
||||
|
||||
// public for testing
|
||||
public static ShardRouting findShardToExplain(ClusterAllocationExplainRequest request, RoutingAllocation allocation) {
|
||||
ShardRouting foundShard = null;
|
||||
if (request.useAnyUnassignedShard()) {
|
||||
// If we can use any shard, just pick the first unassigned one (if there are any)
|
||||
RoutingNodes.UnassignedShards.UnassignedIterator ui = routingNodes.unassigned().iterator();
|
||||
RoutingNodes.UnassignedShards.UnassignedIterator ui = allocation.routingNodes().unassigned().iterator();
|
||||
if (ui.hasNext()) {
|
||||
foundShard = ui.next();
|
||||
}
|
||||
if (foundShard == null) {
|
||||
throw new IllegalStateException("unable to find any unassigned shards to explain [" + request + "]");
|
||||
}
|
||||
} else {
|
||||
String index = request.getIndex();
|
||||
int shard = request.getShard();
|
||||
if (request.isPrimary()) {
|
||||
// If we're looking for the primary shard, there's only one copy, so pick it directly
|
||||
foundShard = allocation.routingTable().shardRoutingTable(index, shard).primaryShard();
|
||||
if (request.getCurrentNode() != null) {
|
||||
DiscoveryNode primaryNode = allocation.nodes().resolveNode(request.getCurrentNode());
|
||||
// the primary is assigned to a node other than the node specified in the request
|
||||
if (primaryNode.getId().equals(foundShard.currentNodeId()) == false) {
|
||||
throw new IllegalStateException("unable to find primary shard assigned to node [" + request.getCurrentNode() + "]");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If looking for a replica, go through all the replica shards
|
||||
List<ShardRouting> replicaShardRoutings = allocation.routingTable().shardRoutingTable(index, shard).replicaShards();
|
||||
if (replicaShardRoutings.size() > 0) {
|
||||
// Pick the first replica at the very least
|
||||
foundShard = replicaShardRoutings.get(0);
|
||||
// In case there are multiple replicas where some are assigned and some aren't,
|
||||
// try to find one that is unassigned at least
|
||||
if (request.getCurrentNode() != null) {
|
||||
// the request is to explain a replica shard already assigned on a particular node,
|
||||
// so find that shard copy
|
||||
DiscoveryNode replicaNode = allocation.nodes().resolveNode(request.getCurrentNode());
|
||||
for (ShardRouting replica : replicaShardRoutings) {
|
||||
if (replica.unassigned()) {
|
||||
if (replicaNode.getId().equals(replica.currentNodeId())) {
|
||||
foundShard = replica;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (foundShard == null) {
|
||||
throw new IllegalStateException("unable to find a replica shard assigned to node [" +
|
||||
request.getCurrentNode() + "]");
|
||||
}
|
||||
} else {
|
||||
if (replicaShardRoutings.size() > 0) {
|
||||
// Pick the first replica at the very least
|
||||
foundShard = replicaShardRoutings.get(0);
|
||||
for (ShardRouting replica : replicaShardRoutings) {
|
||||
// In case there are multiple replicas where some are assigned and some aren't,
|
||||
// try to find one that is unassigned at least
|
||||
if (replica.unassigned()) {
|
||||
foundShard = replica;
|
||||
break;
|
||||
} else if (replica.started() && (foundShard.initializing() || foundShard.relocating())) {
|
||||
// prefer started shards to initializing or relocating shards because started shards
|
||||
// can be explained
|
||||
foundShard = replica;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (foundShard == null) {
|
||||
listener.onFailure(new ElasticsearchException("unable to find any shards to explain [{}] in the routing table", request));
|
||||
return;
|
||||
throw new IllegalStateException("unable to find any shards to explain [" + request + "] in the routing table");
|
||||
}
|
||||
final ShardRouting shardRouting = foundShard;
|
||||
logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting);
|
||||
|
||||
getShardStores(shardRouting, new ActionListener<IndicesShardStoresResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndicesShardStoresResponse shardStoreResponse) {
|
||||
ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> shardStatuses =
|
||||
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator,
|
||||
request.includeDiskInfo() ? clusterInfo : null);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void getShardStores(ShardRouting shard, final ActionListener<IndicesShardStoresResponse> listener) {
|
||||
IndicesShardStoresRequest request = new IndicesShardStoresRequest(shard.getIndexName());
|
||||
request.shardStatuses("all");
|
||||
shardStoresAction.execute(request, listener);
|
||||
return foundShard;
|
||||
}
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
@ -139,7 +140,7 @@ public abstract class AbstractAllocationDecision implements ToXContent, Writeabl
|
||||
public XContentBuilder nodeDecisionsToXContent(List<NodeAllocationResult> nodeDecisions, XContentBuilder builder, Params params)
|
||||
throws IOException {
|
||||
|
||||
if (nodeDecisions != null) {
|
||||
if (nodeDecisions != null && nodeDecisions.isEmpty() == false) {
|
||||
builder.startArray("node_allocation_decisions");
|
||||
{
|
||||
for (NodeAllocationResult explanation : nodeDecisions) {
|
||||
@ -166,4 +167,21 @@ public abstract class AbstractAllocationDecision implements ToXContent, Writeabl
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other == null || other instanceof AbstractAllocationDecision == false) {
|
||||
return false;
|
||||
}
|
||||
@SuppressWarnings("unchecked") AbstractAllocationDecision that = (AbstractAllocationDecision) other;
|
||||
return Objects.equals(targetNode, that.targetNode) && Objects.equals(nodeDecisions, that.nodeDecisions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(targetNode, nodeDecisions);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ public class AllocateUnassignedDecision extends AbstractAllocationDecision {
|
||||
|
||||
@Override
|
||||
public boolean isDecisionTaken() {
|
||||
return this != NOT_TAKEN;
|
||||
return allocationStatus != AllocationStatus.NO_ATTEMPT;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -238,35 +238,34 @@ public class AllocateUnassignedDecision extends AbstractAllocationDecision {
|
||||
@Override
|
||||
public String getExplanation() {
|
||||
checkDecisionState();
|
||||
String explanation;
|
||||
if (allocationStatus == null) {
|
||||
explanation = "can allocate the shard";
|
||||
} else if (allocationStatus == AllocationStatus.DECIDERS_THROTTLED) {
|
||||
explanation = "allocation temporarily throttled";
|
||||
} else if (allocationStatus == AllocationStatus.FETCHING_SHARD_DATA) {
|
||||
explanation = "cannot allocate because information about existing shard data is still being retrieved from " +
|
||||
"some of the nodes";
|
||||
} else if (allocationStatus == AllocationStatus.NO_VALID_SHARD_COPY) {
|
||||
if (getNodeDecisions() != null && getNodeDecisions().size() > 0) {
|
||||
explanation = "cannot allocate because all existing copies of the shard are unreadable";
|
||||
AllocationDecision allocationDecision = getAllocationDecision();
|
||||
if (allocationDecision == AllocationDecision.YES) {
|
||||
return "can allocate the shard";
|
||||
} else if (allocationDecision == AllocationDecision.THROTTLED) {
|
||||
return "allocation temporarily throttled";
|
||||
} else if (allocationDecision == AllocationDecision.AWAITING_INFO) {
|
||||
return "cannot allocate because information about existing shard data is still being retrieved from some of the nodes";
|
||||
} else if (allocationDecision == AllocationDecision.NO_VALID_SHARD_COPY) {
|
||||
if (getNodeDecisions() != null && getNodeDecisions().isEmpty() == false) {
|
||||
return "cannot allocate because all found copies of the shard are either stale or corrupt";
|
||||
} else {
|
||||
explanation = "cannot allocate because a previous copy of the shard existed but could not be found";
|
||||
return "cannot allocate because a previous copy of the primary shard existed but can no longer be found on " +
|
||||
"the nodes in the cluster";
|
||||
}
|
||||
} else if (allocationStatus == AllocationStatus.DELAYED_ALLOCATION) {
|
||||
explanation = "cannot allocate because the cluster is still waiting " +
|
||||
} else if (allocationDecision == AllocationDecision.ALLOCATION_DELAYED) {
|
||||
return "cannot allocate because the cluster is still waiting " +
|
||||
TimeValue.timeValueMillis(remainingDelayInMillis) +
|
||||
" for the departed node holding a replica to rejoin" +
|
||||
(atLeastOneNodeWithYesDecision() ?
|
||||
", despite being allowed to allocate the shard to at least one other node" : "");
|
||||
} else {
|
||||
assert allocationStatus == AllocationStatus.DECIDERS_NO;
|
||||
assert allocationDecision == AllocationDecision.NO;
|
||||
if (reuseStore) {
|
||||
explanation = "cannot allocate because allocation is not permitted to any of the nodes that hold an in-sync shard copy";
|
||||
return "cannot allocate because allocation is not permitted to any of the nodes that hold an in-sync shard copy";
|
||||
} else {
|
||||
explanation = "cannot allocate because allocation is not permitted to any of the nodes";
|
||||
return "cannot allocate because allocation is not permitted to any of the nodes";
|
||||
}
|
||||
}
|
||||
return explanation;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -300,4 +299,26 @@ public class AllocateUnassignedDecision extends AbstractAllocationDecision {
|
||||
out.writeVLong(configuredDelayInMillis);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (super.equals(other) == false) {
|
||||
return false;
|
||||
}
|
||||
if (other instanceof AllocateUnassignedDecision == false) {
|
||||
return false;
|
||||
}
|
||||
@SuppressWarnings("unchecked") AllocateUnassignedDecision that = (AllocateUnassignedDecision) other;
|
||||
return Objects.equals(allocationStatus, that.allocationStatus)
|
||||
&& Objects.equals(allocationId, that.allocationId)
|
||||
&& reuseStore == that.reuseStore
|
||||
&& configuredDelayInMillis == that.configuredDelayInMillis
|
||||
&& remainingDelayInMillis == that.remainingDelayInMillis;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 * super.hashCode() + Objects.hash(allocationStatus, allocationId, reuseStore,
|
||||
configuredDelayInMillis, remainingDelayInMillis);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ public enum AllocationDecision implements Writeable {
|
||||
/**
|
||||
* The allocation attempt was throttled for the shard.
|
||||
*/
|
||||
THROTTLE((byte) 1),
|
||||
THROTTLED((byte) 1),
|
||||
/**
|
||||
* The shard cannot be allocated, which can happen for any number of reasons,
|
||||
* including the allocation deciders gave a NO decision for allocating.
|
||||
@ -56,12 +56,12 @@ public enum AllocationDecision implements Writeable {
|
||||
* Waiting on getting shard data from all nodes before making a decision
|
||||
* about where to allocate the shard.
|
||||
*/
|
||||
FETCH_PENDING((byte) 4),
|
||||
AWAITING_INFO((byte) 4),
|
||||
/**
|
||||
* The allocation decision has been delayed waiting for a replica with a shard copy
|
||||
* that left the cluster to rejoin.
|
||||
*/
|
||||
DELAYED_ALLOCATION((byte) 5),
|
||||
ALLOCATION_DELAYED((byte) 5),
|
||||
/**
|
||||
* The shard was denied allocation because there were no valid shard copies
|
||||
* found for it amongst the nodes in the cluster.
|
||||
@ -90,15 +90,15 @@ public enum AllocationDecision implements Writeable {
|
||||
case 0:
|
||||
return YES;
|
||||
case 1:
|
||||
return THROTTLE;
|
||||
return THROTTLED;
|
||||
case 2:
|
||||
return NO;
|
||||
case 3:
|
||||
return WORSE_BALANCE;
|
||||
case 4:
|
||||
return FETCH_PENDING;
|
||||
return AWAITING_INFO;
|
||||
case 5:
|
||||
return DELAYED_ALLOCATION;
|
||||
return ALLOCATION_DELAYED;
|
||||
case 6:
|
||||
return NO_VALID_SHARD_COPY;
|
||||
case 7:
|
||||
@ -117,11 +117,11 @@ public enum AllocationDecision implements Writeable {
|
||||
} else {
|
||||
switch (allocationStatus) {
|
||||
case DECIDERS_THROTTLED:
|
||||
return THROTTLE;
|
||||
return THROTTLED;
|
||||
case FETCHING_SHARD_DATA:
|
||||
return FETCH_PENDING;
|
||||
return AWAITING_INFO;
|
||||
case DELAYED_ALLOCATION:
|
||||
return DELAYED_ALLOCATION;
|
||||
return ALLOCATION_DELAYED;
|
||||
case NO_VALID_SHARD_COPY:
|
||||
return NO_VALID_SHARD_COPY;
|
||||
case NO_ATTEMPT:
|
||||
@ -141,7 +141,7 @@ public enum AllocationDecision implements Writeable {
|
||||
case YES:
|
||||
return YES;
|
||||
case THROTTLE:
|
||||
return THROTTLE;
|
||||
return THROTTLED;
|
||||
default:
|
||||
assert type == Decision.Type.NO;
|
||||
return NO;
|
||||
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Represents a decision to move a started shard, either because it is no longer allowed to remain on its current node
|
||||
@ -48,15 +49,15 @@ public final class MoveDecision extends AbstractAllocationDecision {
|
||||
@Nullable
|
||||
private final Decision canRemainDecision;
|
||||
@Nullable
|
||||
private final Decision canRebalanceDecision;
|
||||
private final Decision clusterRebalanceDecision;
|
||||
private final int currentNodeRanking;
|
||||
|
||||
private MoveDecision(Decision canRemainDecision, Decision canRebalanceDecision, AllocationDecision allocationDecision,
|
||||
private MoveDecision(Decision canRemainDecision, Decision clusterRebalanceDecision, AllocationDecision allocationDecision,
|
||||
DiscoveryNode assignedNode, List<NodeAllocationResult> nodeDecisions, int currentNodeRanking) {
|
||||
super(assignedNode, nodeDecisions);
|
||||
this.allocationDecision = allocationDecision;
|
||||
this.canRemainDecision = canRemainDecision;
|
||||
this.canRebalanceDecision = canRebalanceDecision;
|
||||
this.clusterRebalanceDecision = clusterRebalanceDecision;
|
||||
this.currentNodeRanking = currentNodeRanking;
|
||||
}
|
||||
|
||||
@ -64,7 +65,7 @@ public final class MoveDecision extends AbstractAllocationDecision {
|
||||
super(in);
|
||||
allocationDecision = in.readOptionalWriteable(AllocationDecision::readFrom);
|
||||
canRemainDecision = in.readOptionalWriteable(Decision::readFrom);
|
||||
canRebalanceDecision = in.readOptionalWriteable(Decision::readFrom);
|
||||
clusterRebalanceDecision = in.readOptionalWriteable(Decision::readFrom);
|
||||
currentNodeRanking = in.readVInt();
|
||||
}
|
||||
|
||||
@ -73,7 +74,7 @@ public final class MoveDecision extends AbstractAllocationDecision {
|
||||
super.writeTo(out);
|
||||
out.writeOptionalWriteable(allocationDecision);
|
||||
out.writeOptionalWriteable(canRemainDecision);
|
||||
out.writeOptionalWriteable(canRebalanceDecision);
|
||||
out.writeOptionalWriteable(clusterRebalanceDecision);
|
||||
out.writeVInt(currentNodeRanking);
|
||||
}
|
||||
|
||||
@ -131,7 +132,15 @@ public final class MoveDecision extends AbstractAllocationDecision {
|
||||
|
||||
@Override
|
||||
public boolean isDecisionTaken() {
|
||||
return this != NOT_TAKEN;
|
||||
return canRemainDecision != null || clusterRebalanceDecision != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new move decision from this decision, plus adding a remain decision.
|
||||
*/
|
||||
public MoveDecision withRemainDecision(Decision canRemainDecision) {
|
||||
return new MoveDecision(canRemainDecision, clusterRebalanceDecision, allocationDecision,
|
||||
targetNode, nodeDecisions, currentNodeRanking);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -164,13 +173,13 @@ public final class MoveDecision extends AbstractAllocationDecision {
|
||||
|
||||
/**
|
||||
* Returns {@code true} if the shard is allowed to be rebalanced to another node in the cluster,
|
||||
* returns {@code false} otherwise. If {@link #getCanRebalanceDecision()} returns {@code null}, then
|
||||
* returns {@code false} otherwise. If {@link #getClusterRebalanceDecision()} returns {@code null}, then
|
||||
* the result of this method is meaningless, as no rebalance decision was taken. If {@link #isDecisionTaken()}
|
||||
* returns {@code false}, then invoking this method will throw an {@code IllegalStateException}.
|
||||
*/
|
||||
public boolean canRebalance() {
|
||||
public boolean canRebalanceCluster() {
|
||||
checkDecisionState();
|
||||
return canRebalanceDecision.type() == Type.YES;
|
||||
return clusterRebalanceDecision != null && clusterRebalanceDecision.type() == Type.YES;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -182,9 +191,9 @@ public final class MoveDecision extends AbstractAllocationDecision {
|
||||
* {@code IllegalStateException}.
|
||||
*/
|
||||
@Nullable
|
||||
public Decision getCanRebalanceDecision() {
|
||||
public Decision getClusterRebalanceDecision() {
|
||||
checkDecisionState();
|
||||
return canRebalanceDecision;
|
||||
return clusterRebalanceDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -199,7 +208,7 @@ public final class MoveDecision extends AbstractAllocationDecision {
|
||||
/**
|
||||
* Gets the current ranking of the node to which the shard is currently assigned, relative to the
|
||||
* other nodes in the cluster as reported in {@link NodeAllocationResult#getWeightRanking()}. The
|
||||
* ranking will only return a meaningful positive integer if {@link #getCanRebalanceDecision()} returns
|
||||
* ranking will only return a meaningful positive integer if {@link #getClusterRebalanceDecision()} returns
|
||||
* a non-null value; otherwise, 0 will be returned. If {@link #isDecisionTaken()} returns
|
||||
* {@code false}, then invoking this method will throw an {@code IllegalStateException}.
|
||||
*/
|
||||
@ -212,18 +221,19 @@ public final class MoveDecision extends AbstractAllocationDecision {
|
||||
public String getExplanation() {
|
||||
checkDecisionState();
|
||||
String explanation;
|
||||
if (canRebalanceDecision != null) {
|
||||
if (clusterRebalanceDecision != null) {
|
||||
// it was a decision to rebalance the shard, because the shard was allowed to remain on its current node
|
||||
if (allocationDecision == AllocationDecision.FETCH_PENDING) {
|
||||
if (allocationDecision == AllocationDecision.AWAITING_INFO) {
|
||||
explanation = "cannot rebalance as information about existing copies of this shard in the cluster is still being gathered";
|
||||
} else if (canRebalanceDecision.type() == Type.NO) {
|
||||
explanation = "rebalancing is not allowed on the cluster" + (atLeastOneNodeWithYesDecision() ? ", even though there " +
|
||||
} else if (clusterRebalanceDecision.type() == Type.NO) {
|
||||
explanation = "rebalancing is not allowed" + (atLeastOneNodeWithYesDecision() ? ", even though there " +
|
||||
"is at least one node on which the shard can be allocated" : "");
|
||||
} else if (canRebalanceDecision.type() == Type.THROTTLE) {
|
||||
} else if (clusterRebalanceDecision.type() == Type.THROTTLE) {
|
||||
explanation = "rebalancing is throttled";
|
||||
} else {
|
||||
assert clusterRebalanceDecision.type() == Type.YES;
|
||||
if (getTargetNode() != null) {
|
||||
if (allocationDecision == AllocationDecision.THROTTLE) {
|
||||
if (allocationDecision == AllocationDecision.THROTTLED) {
|
||||
explanation = "shard rebalancing throttled";
|
||||
} else {
|
||||
explanation = "can rebalance shard";
|
||||
@ -235,11 +245,10 @@ public final class MoveDecision extends AbstractAllocationDecision {
|
||||
}
|
||||
} else {
|
||||
// it was a decision to force move the shard
|
||||
if (canRemain()) {
|
||||
explanation = "shard can remain on its current node";
|
||||
} else if (allocationDecision == AllocationDecision.YES) {
|
||||
assert canRemain() == false;
|
||||
if (allocationDecision == AllocationDecision.YES) {
|
||||
explanation = "shard cannot remain on this node and is force-moved to another node";
|
||||
} else if (allocationDecision == AllocationDecision.THROTTLE) {
|
||||
} else if (allocationDecision == AllocationDecision.THROTTLED) {
|
||||
explanation = "shard cannot remain on this node but is throttled on moving to another node";
|
||||
} else {
|
||||
assert allocationDecision == AllocationDecision.NO;
|
||||
@ -263,23 +272,44 @@ public final class MoveDecision extends AbstractAllocationDecision {
|
||||
canRemainDecision.toXContent(builder, params);
|
||||
builder.endArray();
|
||||
}
|
||||
if (canRebalanceDecision != null) {
|
||||
AllocationDecision rebalanceDecision = AllocationDecision.fromDecisionType(canRebalanceDecision.type());
|
||||
if (clusterRebalanceDecision != null) {
|
||||
AllocationDecision rebalanceDecision = AllocationDecision.fromDecisionType(clusterRebalanceDecision.type());
|
||||
builder.field("can_rebalance_cluster", rebalanceDecision);
|
||||
if (rebalanceDecision != AllocationDecision.YES && canRebalanceDecision.getDecisions().isEmpty() == false) {
|
||||
if (rebalanceDecision != AllocationDecision.YES && clusterRebalanceDecision.getDecisions().isEmpty() == false) {
|
||||
builder.startArray("can_rebalance_cluster_decisions");
|
||||
canRebalanceDecision.toXContent(builder, params);
|
||||
clusterRebalanceDecision.toXContent(builder, params);
|
||||
builder.endArray();
|
||||
}
|
||||
}
|
||||
if (canRebalanceDecision != null) {
|
||||
if (clusterRebalanceDecision != null) {
|
||||
builder.field("can_rebalance_to_other_node", allocationDecision);
|
||||
builder.field("rebalance_explanation", getExplanation());
|
||||
} else {
|
||||
builder.field("can_move_to_other_node", forceMove() ? "yes" : "no");
|
||||
builder.field("move_explanation", getExplanation());
|
||||
}
|
||||
builder.field(canRebalanceDecision != null ? "rebalance_explanation" : "move_explanation", getExplanation());
|
||||
nodeDecisionsToXContent(nodeDecisions, builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (super.equals(other) == false) {
|
||||
return false;
|
||||
}
|
||||
if (other instanceof MoveDecision == false) {
|
||||
return false;
|
||||
}
|
||||
@SuppressWarnings("unchecked") MoveDecision that = (MoveDecision) other;
|
||||
return Objects.equals(allocationDecision, that.allocationDecision)
|
||||
&& Objects.equals(canRemainDecision, that.canRemainDecision)
|
||||
&& Objects.equals(clusterRebalanceDecision, that.clusterRebalanceDecision)
|
||||
&& currentNodeRanking == that.currentNodeRanking;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 * super.hashCode() + Objects.hash(allocationDecision, canRemainDecision, clusterRebalanceDecision, currentNodeRanking);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ public class RoutingAllocation {
|
||||
|
||||
private final boolean retryFailed;
|
||||
|
||||
private boolean debugDecision = false;
|
||||
private DebugMode debugDecision = DebugMode.OFF;
|
||||
|
||||
private boolean hasPendingAsyncFetch = false;
|
||||
|
||||
@ -167,11 +167,19 @@ public class RoutingAllocation {
|
||||
return this.ignoreDisable;
|
||||
}
|
||||
|
||||
public void debugDecision(boolean debug) {
|
||||
public void setDebugMode(DebugMode debug) {
|
||||
this.debugDecision = debug;
|
||||
}
|
||||
|
||||
public void debugDecision(boolean debug) {
|
||||
this.debugDecision = debug ? DebugMode.ON : DebugMode.OFF;
|
||||
}
|
||||
|
||||
public boolean debugDecision() {
|
||||
return this.debugDecision != DebugMode.OFF;
|
||||
}
|
||||
|
||||
public DebugMode getDebugMode() {
|
||||
return this.debugDecision;
|
||||
}
|
||||
|
||||
@ -280,4 +288,20 @@ public class RoutingAllocation {
|
||||
public boolean isRetryFailed() {
|
||||
return retryFailed;
|
||||
}
|
||||
|
||||
public enum DebugMode {
|
||||
/**
|
||||
* debug mode is off
|
||||
*/
|
||||
OFF,
|
||||
/**
|
||||
* debug mode is on
|
||||
*/
|
||||
ON,
|
||||
/**
|
||||
* debug mode is on, but YES decisions from a {@link org.elasticsearch.cluster.routing.allocation.decider.Decision.Multi}
|
||||
* are not included.
|
||||
*/
|
||||
EXCLUDE_YES_DECISIONS
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Represents the decision taken for the allocation of a single shard. If
|
||||
* the shard is unassigned, {@link #getAllocateDecision()} will return an
|
||||
* object containing the decision and its explanation, and {@link #getMoveDecision()}
|
||||
* will return an object for which {@link MoveDecision#isDecisionTaken()} returns
|
||||
* {@code false}. If the shard is in the started state, then {@link #getMoveDecision()}
|
||||
* will return an object containing the decision to move/rebalance the shard, and
|
||||
* {@link #getAllocateDecision()} will return an object for which
|
||||
* {@link AllocateUnassignedDecision#isDecisionTaken()} returns {@code false}. If
|
||||
* the shard is neither unassigned nor started (i.e. it is initializing or relocating),
|
||||
* then both {@link #getAllocateDecision()} and {@link #getMoveDecision()} will return
|
||||
* objects whose {@code isDecisionTaken()} method returns {@code false}.
|
||||
*/
|
||||
public final class ShardAllocationDecision implements ToXContent, Writeable {
|
||||
public static final ShardAllocationDecision NOT_TAKEN =
|
||||
new ShardAllocationDecision(AllocateUnassignedDecision.NOT_TAKEN, MoveDecision.NOT_TAKEN);
|
||||
|
||||
private final AllocateUnassignedDecision allocateDecision;
|
||||
private final MoveDecision moveDecision;
|
||||
|
||||
public ShardAllocationDecision(AllocateUnassignedDecision allocateDecision,
|
||||
MoveDecision moveDecision) {
|
||||
this.allocateDecision = allocateDecision;
|
||||
this.moveDecision = moveDecision;
|
||||
}
|
||||
|
||||
public ShardAllocationDecision(StreamInput in) throws IOException {
|
||||
allocateDecision = new AllocateUnassignedDecision(in);
|
||||
moveDecision = new MoveDecision(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
allocateDecision.writeTo(out);
|
||||
moveDecision.writeTo(out);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if either an allocation decision or a move decision was taken
|
||||
* for the shard. If no decision was taken, as in the case of initializing or relocating
|
||||
* shards, then this method returns {@code false}.
|
||||
*/
|
||||
public boolean isDecisionTaken() {
|
||||
return allocateDecision.isDecisionTaken() || moveDecision.isDecisionTaken();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the unassigned allocation decision for the shard. If the shard was not in the unassigned state,
|
||||
* the instance of {@link AllocateUnassignedDecision} that is returned will have {@link AllocateUnassignedDecision#isDecisionTaken()}
|
||||
* return {@code false}.
|
||||
*/
|
||||
public AllocateUnassignedDecision getAllocateDecision() {
|
||||
return allocateDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the move decision for the shard. If the shard was not in the started state,
|
||||
* the instance of {@link MoveDecision} that is returned will have {@link MoveDecision#isDecisionTaken()}
|
||||
* return {@code false}.
|
||||
*/
|
||||
public MoveDecision getMoveDecision() {
|
||||
return moveDecision;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (allocateDecision.isDecisionTaken()) {
|
||||
allocateDecision.toXContent(builder, params);
|
||||
}
|
||||
if (moveDecision.isDecisionTaken()) {
|
||||
moveDecision.toXContent(builder, params);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
}
|
@ -35,6 +35,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.MoveDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
@ -46,6 +47,7 @@ import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.gateway.PriorityComparator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
@ -110,12 +112,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
this.threshold = threshold;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<DiscoveryNode, Float> weighShard(RoutingAllocation allocation, ShardRouting shard) {
|
||||
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
|
||||
return balancer.weighShard(shard);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allocate(RoutingAllocation allocation) {
|
||||
if (allocation.routingNodes().size() == 0) {
|
||||
@ -128,16 +124,21 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
balancer.balance();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a decision on rebalancing a single shard to form a more optimal cluster balance. This
|
||||
* method is not used in itself for cluster rebalancing because all shards from all indices are
|
||||
* taken into account when making rebalancing decisions. This method is only intended to be used
|
||||
* from the cluster allocation explain API to explain possible rebalancing decisions for a single
|
||||
* shard.
|
||||
*/
|
||||
public MoveDecision decideRebalance(final ShardRouting shard, final RoutingAllocation allocation) {
|
||||
assert allocation.debugDecision() : "debugDecision should be set in explain mode";
|
||||
return new Balancer(logger, allocation, weightFunction, threshold).decideRebalance(shard);
|
||||
@Override
|
||||
public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, final RoutingAllocation allocation) {
|
||||
Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
|
||||
AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN;
|
||||
MoveDecision moveDecision = MoveDecision.NOT_TAKEN;
|
||||
if (shard.unassigned()) {
|
||||
allocateUnassignedDecision = balancer.decideAllocateUnassigned(shard, Sets.newHashSet());
|
||||
} else {
|
||||
moveDecision = balancer.decideMove(shard);
|
||||
if (moveDecision.isDecisionTaken() && moveDecision.canRemain()) {
|
||||
MoveDecision rebalanceDecision = balancer.decideRebalance(shard);
|
||||
moveDecision = rebalanceDecision.withRemainDecision(moveDecision.getCanRemainDecision());
|
||||
}
|
||||
}
|
||||
return new ShardAllocationDecision(allocateUnassignedDecision, moveDecision);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -337,7 +338,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
*/
|
||||
private MoveDecision decideRebalance(final ShardRouting shard) {
|
||||
if (shard.started() == false) {
|
||||
// cannot rebalance a shard that isn't started
|
||||
// we can only rebalance started shards
|
||||
return MoveDecision.NOT_TAKEN;
|
||||
}
|
||||
|
||||
@ -437,7 +438,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
}
|
||||
|
||||
if (canRebalance.type() != Type.YES || allocation.hasPendingAsyncFetch()) {
|
||||
AllocationDecision allocationDecision = allocation.hasPendingAsyncFetch() ? AllocationDecision.FETCH_PENDING :
|
||||
AllocationDecision allocationDecision = allocation.hasPendingAsyncFetch() ? AllocationDecision.AWAITING_INFO :
|
||||
AllocationDecision.fromDecisionType(canRebalance.type());
|
||||
return MoveDecision.cannotRebalance(canRebalance, allocationDecision, currentNodeWeightRanking, nodeDecisions);
|
||||
} else {
|
||||
@ -644,7 +645,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
// offloading the shards.
|
||||
for (Iterator<ShardRouting> it = allocation.routingNodes().nodeInterleavedShardIterator(); it.hasNext(); ) {
|
||||
ShardRouting shardRouting = it.next();
|
||||
final MoveDecision moveDecision = makeMoveDecision(shardRouting);
|
||||
final MoveDecision moveDecision = decideMove(shardRouting);
|
||||
if (moveDecision.isDecisionTaken() && moveDecision.forceMove()) {
|
||||
final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
|
||||
final ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId());
|
||||
@ -673,7 +674,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
* 4. If the method is invoked in explain mode (e.g. from the cluster allocation explain APIs), then
|
||||
* {@link MoveDecision#nodeDecisions} will have a non-null value.
|
||||
*/
|
||||
public MoveDecision makeMoveDecision(final ShardRouting shardRouting) {
|
||||
public MoveDecision decideMove(final ShardRouting shardRouting) {
|
||||
if (shardRouting.started() == false) {
|
||||
// we can only move started shards
|
||||
return MoveDecision.NOT_TAKEN;
|
||||
|
@ -19,11 +19,12 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.allocator;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.MoveDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
|
||||
import java.util.Map;
|
||||
/**
|
||||
* <p>
|
||||
* A {@link ShardsAllocator} is the main entry point for shard allocation on nodes in the cluster.
|
||||
@ -44,13 +45,17 @@ public interface ShardsAllocator {
|
||||
void allocate(RoutingAllocation allocation);
|
||||
|
||||
/**
|
||||
* Returns a map of node to a float "weight" of where the allocator would like to place the shard.
|
||||
* Higher weights signify greater desire to place the shard on that node.
|
||||
* Does not modify the allocation at all.
|
||||
* Returns the decision for where a shard should reside in the cluster. If the shard is unassigned,
|
||||
* then the {@link AllocateUnassignedDecision} will be non-null. If the shard is not in the unassigned
|
||||
* state, then the {@link MoveDecision} will be non-null.
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @param shard shard to weigh
|
||||
* @return map of nodes to float weights
|
||||
* This method is primarily used by the cluster allocation explain API to provide detailed explanations
|
||||
* for the allocation of a single shard. Implementations of the {@link #allocate(RoutingAllocation)} method
|
||||
* may use the results of this method implementation to decide on allocating shards in the routing table
|
||||
* to the cluster.
|
||||
*
|
||||
* If an implementation of this interface does not support explaining decisions for a single shard through
|
||||
* the cluster explain API, then this method should throw a {@code UnsupportedOperationException}.
|
||||
*/
|
||||
Map<DiscoveryNode, Float> weighShard(RoutingAllocation allocation, ShardRouting shard);
|
||||
ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation);
|
||||
}
|
||||
|
@ -23,13 +23,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.allocation.RoutingAllocation.DebugMode.EXCLUDE_YES_DECISIONS;
|
||||
|
||||
/**
|
||||
* A composite {@link AllocationDecider} combining the "decision" of multiple
|
||||
@ -56,7 +55,8 @@ public class AllocationDeciders extends AllocationDecider {
|
||||
} else {
|
||||
ret.add(decision);
|
||||
}
|
||||
} else if (decision != Decision.ALWAYS) {
|
||||
} else if (decision != Decision.ALWAYS
|
||||
&& (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) {
|
||||
ret.add(decision);
|
||||
}
|
||||
}
|
||||
@ -82,7 +82,8 @@ public class AllocationDeciders extends AllocationDecider {
|
||||
} else {
|
||||
ret.add(decision);
|
||||
}
|
||||
} else if (decision != Decision.ALWAYS) {
|
||||
} else if (decision != Decision.ALWAYS
|
||||
&& (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) {
|
||||
// the assumption is that a decider that returns the static instance Decision#ALWAYS
|
||||
// does not really implements canAllocate
|
||||
ret.add(decision);
|
||||
@ -112,7 +113,8 @@ public class AllocationDeciders extends AllocationDecider {
|
||||
} else {
|
||||
ret.add(decision);
|
||||
}
|
||||
} else if (decision != Decision.ALWAYS) {
|
||||
} else if (decision != Decision.ALWAYS
|
||||
&& (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) {
|
||||
ret.add(decision);
|
||||
}
|
||||
}
|
||||
@ -131,7 +133,8 @@ public class AllocationDeciders extends AllocationDecider {
|
||||
} else {
|
||||
ret.add(decision);
|
||||
}
|
||||
} else if (decision != Decision.ALWAYS) {
|
||||
} else if (decision != Decision.ALWAYS
|
||||
&& (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) {
|
||||
ret.add(decision);
|
||||
}
|
||||
}
|
||||
@ -150,7 +153,8 @@ public class AllocationDeciders extends AllocationDecider {
|
||||
} else {
|
||||
ret.add(decision);
|
||||
}
|
||||
} else if (decision != Decision.ALWAYS) {
|
||||
} else if (decision != Decision.ALWAYS
|
||||
&& (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) {
|
||||
ret.add(decision);
|
||||
}
|
||||
}
|
||||
@ -169,7 +173,8 @@ public class AllocationDeciders extends AllocationDecider {
|
||||
} else {
|
||||
ret.add(decision);
|
||||
}
|
||||
} else if (decision != Decision.ALWAYS) {
|
||||
} else if (decision != Decision.ALWAYS
|
||||
&& (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) {
|
||||
ret.add(decision);
|
||||
}
|
||||
}
|
||||
@ -188,7 +193,8 @@ public class AllocationDeciders extends AllocationDecider {
|
||||
} else {
|
||||
ret.add(decision);
|
||||
}
|
||||
} else if (decision != Decision.ALWAYS) {
|
||||
} else if (decision != Decision.ALWAYS
|
||||
&& (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) {
|
||||
ret.add(decision);
|
||||
}
|
||||
}
|
||||
@ -216,7 +222,8 @@ public class AllocationDeciders extends AllocationDecider {
|
||||
} else {
|
||||
ret.add(decision);
|
||||
}
|
||||
} else if (decision != Decision.ALWAYS) {
|
||||
} else if (decision != Decision.ALWAYS
|
||||
&& (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) {
|
||||
ret.add(decision);
|
||||
}
|
||||
}
|
||||
|
@ -140,6 +140,12 @@ public abstract class Decision implements ToXContent, Writeable {
|
||||
@Nullable
|
||||
public abstract String label();
|
||||
|
||||
/**
|
||||
* Get the explanation for this decision.
|
||||
*/
|
||||
@Nullable
|
||||
public abstract String getExplanation();
|
||||
|
||||
/**
|
||||
* Return the list of all decisions that make up this decision
|
||||
*/
|
||||
@ -200,6 +206,7 @@ public abstract class Decision implements ToXContent, Writeable {
|
||||
/**
|
||||
* Returns the explanation string, fully formatted. Only formats the string once.
|
||||
*/
|
||||
@Override
|
||||
@Nullable
|
||||
public String getExplanation() {
|
||||
if (explanationString == null && explanation != null) {
|
||||
@ -301,6 +308,12 @@ public abstract class Decision implements ToXContent, Writeable {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@Nullable
|
||||
public String getExplanation() {
|
||||
throw new UnsupportedOperationException("multi-level decisions do not have an explanation");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Decision> getDecisions() {
|
||||
return Collections.unmodifiableList(this.decisions);
|
||||
|
@ -178,10 +178,12 @@ public class EnableAllocationDecider extends AllocationDecider {
|
||||
}
|
||||
|
||||
private static String setting(Allocation allocation, boolean usedIndexSetting) {
|
||||
StringBuilder buf = new StringBuilder("[");
|
||||
StringBuilder buf = new StringBuilder();
|
||||
if (usedIndexSetting) {
|
||||
buf.append("index setting [");
|
||||
buf.append(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey());
|
||||
} else {
|
||||
buf.append("cluster setting [");
|
||||
buf.append(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey());
|
||||
}
|
||||
buf.append("=").append(allocation.toString().toLowerCase(Locale.ROOT)).append("]");
|
||||
|
@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedShard;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
@ -57,24 +58,6 @@ public class GatewayAllocator extends AbstractComponent {
|
||||
this.replicaShardAllocator = new InternalReplicaShardAllocator(settings, storeAction);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the given shard has an async fetch pending
|
||||
*/
|
||||
public boolean hasFetchPending(ShardId shardId, boolean primary) {
|
||||
if (primary) {
|
||||
AsyncShardFetch<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetch = asyncFetchStarted.get(shardId);
|
||||
if (fetch != null) {
|
||||
return fetch.getNumberOfInFlightFetches() > 0;
|
||||
}
|
||||
} else {
|
||||
AsyncShardFetch<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetch = asyncFetchStore.get(shardId);
|
||||
if (fetch != null) {
|
||||
return fetch.getNumberOfInFlightFetches() > 0;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public void setReallocation(final ClusterService clusterService, final RoutingService routingService) {
|
||||
this.routingService = routingService;
|
||||
clusterService.addStateApplier(event -> {
|
||||
@ -137,6 +120,18 @@ public class GatewayAllocator extends AbstractComponent {
|
||||
replicaShardAllocator.allocateUnassigned(allocation);
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes and returns the design for allocating a single unassigned shard. If called on an assigned shard,
|
||||
* {@link AllocateUnassignedDecision#NOT_TAKEN} is returned.
|
||||
*/
|
||||
public AllocateUnassignedDecision decideUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation) {
|
||||
if (unassignedShard.primary()) {
|
||||
return primaryShardAllocator.makeAllocationDecision(unassignedShard, routingAllocation, logger);
|
||||
} else {
|
||||
return replicaShardAllocator.makeAllocationDecision(unassignedShard, routingAllocation, logger);
|
||||
}
|
||||
}
|
||||
|
||||
class InternalAsyncFetch<T extends BaseNodeResponse> extends AsyncShardFetch<T> {
|
||||
|
||||
public InternalAsyncFetch(Logger logger, String type, ShardId shardId, Lister<? extends BaseNodesResponse<T>, T> action) {
|
||||
@ -161,11 +156,8 @@ public class GatewayAllocator extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
protected AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation) {
|
||||
AsyncShardFetch<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetch = asyncFetchStarted.get(shard.shardId());
|
||||
if (fetch == null) {
|
||||
fetch = new InternalAsyncFetch<>(logger, "shard_started", shard.shardId(), startedAction);
|
||||
asyncFetchStarted.put(shard.shardId(), fetch);
|
||||
}
|
||||
AsyncShardFetch<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetch =
|
||||
asyncFetchStarted.computeIfAbsent(shard.shardId(), shardId -> new InternalAsyncFetch<>(logger, "shard_started", shardId, startedAction));
|
||||
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState =
|
||||
fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId()));
|
||||
|
||||
@ -187,11 +179,8 @@ public class GatewayAllocator extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
protected AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetchData(ShardRouting shard, RoutingAllocation allocation) {
|
||||
AsyncShardFetch<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetch = asyncFetchStore.get(shard.shardId());
|
||||
if (fetch == null) {
|
||||
fetch = new InternalAsyncFetch<>(logger, "shard_store", shard.shardId(), storeAction);
|
||||
asyncFetchStore.put(shard.shardId(), fetch);
|
||||
}
|
||||
AsyncShardFetch<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetch =
|
||||
asyncFetchStore.computeIfAbsent(shard.shardId(), shardId -> new InternalAsyncFetch<>(logger, "shard_store", shard.shardId(), storeAction));
|
||||
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores =
|
||||
fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId()));
|
||||
if (shardStores.hasData()) {
|
||||
|
@ -183,7 +183,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
||||
// this shard will be picked up when the node joins and we do another allocation reroute
|
||||
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]",
|
||||
unassignedShard.index(), unassignedShard.id(), nodeShardsResult.allocationsFound);
|
||||
return AllocateUnassignedDecision.no(AllocationStatus.NO_VALID_SHARD_COPY, null);
|
||||
return AllocateUnassignedDecision.no(AllocationStatus.NO_VALID_SHARD_COPY, explain ? new ArrayList<>() : null);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,10 +70,12 @@ public class RestClusterAllocationExplainAction extends BaseRestHandler {
|
||||
try {
|
||||
req.includeYesDecisions(request.paramAsBoolean("include_yes_decisions", false));
|
||||
req.includeDiskInfo(request.paramAsBoolean("include_disk_info", false));
|
||||
final boolean humanReadable = request.paramAsBoolean("human", false);
|
||||
return channel ->
|
||||
client.admin().cluster().allocationExplain(req, new RestBuilderListener<ClusterAllocationExplainResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(ClusterAllocationExplainResponse response, XContentBuilder builder) throws Exception {
|
||||
builder.humanReadable(humanReadable);
|
||||
response.getExplanation().toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
return new BytesRestResponse(RestStatus.OK, builder);
|
||||
}
|
||||
|
@ -0,0 +1,170 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.gateway.TestGatewayAllocator;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction.findShardToExplain;
|
||||
|
||||
/**
|
||||
* Tests for the {@link TransportClusterAllocationExplainAction} class.
|
||||
*/
|
||||
public class ClusterAllocationExplainActionTests extends ESTestCase {
|
||||
|
||||
private static final AllocationDeciders NOOP_DECIDERS = new AllocationDeciders(Settings.EMPTY, Collections.emptyList());
|
||||
|
||||
public void testInitializingOrRelocatingShardExplanation() throws Exception {
|
||||
ShardRoutingState shardRoutingState = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.RELOCATING);
|
||||
ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), shardRoutingState);
|
||||
ShardRouting shard = clusterState.getRoutingTable().index("idx").shard(0).primaryShard();
|
||||
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.emptyList()),
|
||||
clusterState.getRoutingNodes(), clusterState, null, System.nanoTime(), randomBoolean());
|
||||
ClusterAllocationExplanation cae = TransportClusterAllocationExplainAction.explainShard(shard, allocation, null, randomBoolean(),
|
||||
new TestGatewayAllocator(), new ShardsAllocator() {
|
||||
@Override
|
||||
public void allocate(RoutingAllocation allocation) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) {
|
||||
if (shard.initializing() || shard.relocating()) {
|
||||
return ShardAllocationDecision.NOT_TAKEN;
|
||||
} else {
|
||||
throw new UnsupportedOperationException("cannot explain");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
assertEquals(shard.currentNodeId(), cae.getCurrentNode().getId());
|
||||
assertFalse(cae.getShardAllocationDecision().isDecisionTaken());
|
||||
assertFalse(cae.getShardAllocationDecision().getAllocateDecision().isDecisionTaken());
|
||||
assertFalse(cae.getShardAllocationDecision().getMoveDecision().isDecisionTaken());
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
cae.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
String explanation;
|
||||
if (shardRoutingState == ShardRoutingState.RELOCATING) {
|
||||
explanation = "the shard is in the process of relocating from node [] to node [], wait until " +
|
||||
"relocation has completed";
|
||||
} else {
|
||||
explanation = "the shard is in the process of initializing on node [], " +
|
||||
"wait until initialization has completed";
|
||||
}
|
||||
assertEquals("{\"index\":\"idx\",\"shard\":0,\"primary\":true,\"current_state\":\"" +
|
||||
shardRoutingState.toString().toLowerCase(Locale.ROOT) + "\",\"current_node\":" +
|
||||
"{\"id\":\"" + cae.getCurrentNode().getId() + "\",\"name\":\"" + cae.getCurrentNode().getName() +
|
||||
"\",\"transport_address\":\"" + cae.getCurrentNode().getAddress() +
|
||||
"\"},\"explanation\":\"" + explanation + "\"}", builder.string());
|
||||
}
|
||||
|
||||
public void testFindAnyUnassignedShardToExplain() {
|
||||
// find unassigned primary
|
||||
ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.UNASSIGNED);
|
||||
ClusterAllocationExplainRequest request = new ClusterAllocationExplainRequest();
|
||||
ShardRouting shard = findShardToExplain(request, routingAllocation(clusterState));
|
||||
assertEquals(clusterState.getRoutingTable().index("idx").shard(0).primaryShard(), shard);
|
||||
|
||||
// find unassigned replica
|
||||
clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED, ShardRoutingState.UNASSIGNED);
|
||||
request = new ClusterAllocationExplainRequest();
|
||||
shard = findShardToExplain(request, routingAllocation(clusterState));
|
||||
assertEquals(clusterState.getRoutingTable().index("idx").shard(0).replicaShards().get(0), shard);
|
||||
|
||||
// no unassigned shard to explain
|
||||
final ClusterState allStartedClusterState = ClusterStateCreationUtils.state("idx", randomBoolean(),
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED);
|
||||
final ClusterAllocationExplainRequest anyUnassignedShardsRequest = new ClusterAllocationExplainRequest();
|
||||
expectThrows(IllegalStateException.class, () ->
|
||||
findShardToExplain(anyUnassignedShardsRequest, routingAllocation(allStartedClusterState)));
|
||||
}
|
||||
|
||||
public void testFindPrimaryShardToExplain() {
|
||||
ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), randomFrom(ShardRoutingState.values()));
|
||||
ClusterAllocationExplainRequest request = new ClusterAllocationExplainRequest("idx", 0, true, null);
|
||||
ShardRouting shard = findShardToExplain(request, routingAllocation(clusterState));
|
||||
assertEquals(clusterState.getRoutingTable().index("idx").shard(0).primaryShard(), shard);
|
||||
}
|
||||
|
||||
public void testFindAnyReplicaToExplain() {
|
||||
// prefer unassigned replicas to started replicas
|
||||
ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED,
|
||||
ShardRoutingState.STARTED, ShardRoutingState.UNASSIGNED);
|
||||
ClusterAllocationExplainRequest request = new ClusterAllocationExplainRequest("idx", 0, false, null);
|
||||
ShardRouting shard = findShardToExplain(request, routingAllocation(clusterState));
|
||||
assertEquals(clusterState.getRoutingTable().index("idx").shard(0).replicaShards()
|
||||
.stream().filter(ShardRouting::unassigned).findFirst().get(), shard);
|
||||
|
||||
// prefer started replicas to initializing/relocating replicas
|
||||
clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED,
|
||||
randomFrom(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING), ShardRoutingState.STARTED);
|
||||
request = new ClusterAllocationExplainRequest("idx", 0, false, null);
|
||||
shard = findShardToExplain(request, routingAllocation(clusterState));
|
||||
assertEquals(clusterState.getRoutingTable().index("idx").shard(0).replicaShards()
|
||||
.stream().filter(ShardRouting::started).findFirst().get(), shard);
|
||||
}
|
||||
|
||||
public void testFindShardAssignedToNode() {
|
||||
// find shard with given node
|
||||
final boolean primary = randomBoolean();
|
||||
ShardRoutingState[] replicaStates = new ShardRoutingState[0];
|
||||
if (primary == false) {
|
||||
replicaStates = new ShardRoutingState[] { ShardRoutingState.STARTED };
|
||||
}
|
||||
ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED, replicaStates);
|
||||
ShardRouting shardToExplain = primary ? clusterState.getRoutingTable().index("idx").shard(0).primaryShard() :
|
||||
clusterState.getRoutingTable().index("idx").shard(0).replicaShards().get(0);
|
||||
ClusterAllocationExplainRequest request = new ClusterAllocationExplainRequest("idx", 0, primary, shardToExplain.currentNodeId());
|
||||
RoutingAllocation allocation = routingAllocation(clusterState);
|
||||
ShardRouting foundShard = findShardToExplain(request, allocation);
|
||||
assertEquals(shardToExplain, foundShard);
|
||||
|
||||
// shard is not assigned to given node
|
||||
String explainNode = null;
|
||||
for (RoutingNode routingNode : clusterState.getRoutingNodes()) {
|
||||
if (routingNode.nodeId().equals(shardToExplain.currentNodeId()) == false) {
|
||||
explainNode = routingNode.nodeId();
|
||||
break;
|
||||
}
|
||||
}
|
||||
final ClusterAllocationExplainRequest failingRequest = new ClusterAllocationExplainRequest("idx", 0, primary, explainNode);
|
||||
expectThrows(IllegalStateException.class, () -> findShardToExplain(failingRequest, allocation));
|
||||
}
|
||||
|
||||
private static RoutingAllocation routingAllocation(ClusterState clusterState) {
|
||||
return new RoutingAllocation(NOOP_DECIDERS, clusterState.getRoutingNodes(), clusterState, null, System.nanoTime(), randomBoolean());
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -26,7 +26,8 @@ public class ClusterAllocationExplainRequestTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws Exception {
|
||||
ClusterAllocationExplainRequest request =
|
||||
new ClusterAllocationExplainRequest(randomAsciiOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), randomBoolean());
|
||||
new ClusterAllocationExplainRequest(randomAsciiOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), randomBoolean(),
|
||||
randomBoolean() ? randomAsciiOfLength(5) : null);
|
||||
request.includeYesDecisions(randomBoolean());
|
||||
request.includeDiskInfo(randomBoolean());
|
||||
BytesStreamOutput output = new BytesStreamOutput();
|
||||
@ -39,6 +40,7 @@ public class ClusterAllocationExplainRequestTests extends ESTestCase {
|
||||
assertEquals(request.isPrimary(), actual.isPrimary());
|
||||
assertEquals(request.includeYesDecisions(), actual.includeYesDecisions());
|
||||
assertEquals(request.includeDiskInfo(), actual.includeDiskInfo());
|
||||
assertEquals(request.getCurrentNode(), actual.getCurrentNode());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,98 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
|
||||
/**
|
||||
* Tests for the cluster allocation explanation
|
||||
*/
|
||||
public final class ClusterAllocationExplainTests extends ESSingleNodeTestCase {
|
||||
|
||||
public void testShardExplain() throws Exception {
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings("index.number_of_shards", 1, "index.number_of_replicas", 1).get();
|
||||
ClusterAllocationExplainResponse resp = client().admin().cluster().prepareAllocationExplain()
|
||||
.setIndex("test").setShard(0).setPrimary(false).get();
|
||||
|
||||
ClusterAllocationExplanation cae = resp.getExplanation();
|
||||
assertNotNull("should always have an explanation", cae);
|
||||
assertEquals("test", cae.getShard().getIndexName());
|
||||
assertEquals(0, cae.getShard().getId());
|
||||
assertEquals(false, cae.isPrimary());
|
||||
assertNull(cae.getAssignedNodeId());
|
||||
assertFalse(cae.isStillFetchingShardData());
|
||||
assertNotNull(cae.getUnassignedInfo());
|
||||
NodeExplanation explanation = cae.getNodeExplanations().values().iterator().next();
|
||||
ClusterAllocationExplanation.FinalDecision fd = explanation.getFinalDecision();
|
||||
ClusterAllocationExplanation.StoreCopy storeCopy = explanation.getStoreCopy();
|
||||
Decision d = explanation.getDecision();
|
||||
assertNotNull("should have a decision", d);
|
||||
assertEquals(Decision.Type.NO, d.type());
|
||||
assertEquals(ClusterAllocationExplanation.FinalDecision.NO, fd);
|
||||
assertEquals(ClusterAllocationExplanation.StoreCopy.AVAILABLE, storeCopy);
|
||||
assertTrue(d.toString(), d.toString().contains("NO(the shard cannot be allocated to the same node"));
|
||||
assertTrue(d instanceof Decision.Multi);
|
||||
Decision.Multi md = (Decision.Multi) d;
|
||||
Decision ssd = md.getDecisions().get(0);
|
||||
assertEquals(Decision.Type.NO, ssd.type());
|
||||
assertTrue(ssd.toString(), ssd.toString().contains("NO(the shard cannot be allocated to the same node"));
|
||||
Float weight = explanation.getWeight();
|
||||
assertNotNull("should have a weight", weight);
|
||||
|
||||
resp = client().admin().cluster().prepareAllocationExplain().setIndex("test").setShard(0).setPrimary(true).get();
|
||||
|
||||
cae = resp.getExplanation();
|
||||
assertNotNull("should always have an explanation", cae);
|
||||
assertEquals("test", cae.getShard().getIndexName());
|
||||
assertEquals(0, cae.getShard().getId());
|
||||
assertEquals(true, cae.isPrimary());
|
||||
assertFalse(cae.isStillFetchingShardData());
|
||||
assertNotNull("shard should have assigned node id", cae.getAssignedNodeId());
|
||||
assertNull("assigned shard should not have unassigned info", cae.getUnassignedInfo());
|
||||
explanation = cae.getNodeExplanations().values().iterator().next();
|
||||
d = explanation.getDecision();
|
||||
fd = explanation.getFinalDecision();
|
||||
storeCopy = explanation.getStoreCopy();
|
||||
assertNotNull("should have a decision", d);
|
||||
assertEquals(Decision.Type.NO, d.type());
|
||||
assertEquals(ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED, fd);
|
||||
assertEquals(ClusterAllocationExplanation.StoreCopy.AVAILABLE, storeCopy);
|
||||
assertTrue(d.toString(), d.toString().contains(
|
||||
"NO(the shard cannot be allocated to the node on which it already exists [[test][0]"));
|
||||
assertTrue(d instanceof Decision.Multi);
|
||||
md = (Decision.Multi) d;
|
||||
ssd = md.getDecisions().get(0);
|
||||
assertEquals(Decision.Type.NO, ssd.type());
|
||||
assertTrue(ssd.toString(), ssd.toString().contains(
|
||||
"NO(the shard cannot be allocated to the node on which it already exists [[test][0]"));
|
||||
weight = explanation.getWeight();
|
||||
assertNotNull("should have a weight", weight);
|
||||
|
||||
resp = client().admin().cluster().prepareAllocationExplain().useAnyUnassignedShard().get();
|
||||
cae = resp.getExplanation();
|
||||
assertNotNull("should always have an explanation", cae);
|
||||
assertEquals("test", cae.getShard().getIndexName());
|
||||
assertEquals(0, cae.getShard().getId());
|
||||
assertEquals(false, cae.isPrimary());
|
||||
}
|
||||
}
|
@ -19,21 +19,19 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.MoveDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
@ -41,12 +39,6 @@ import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
|
||||
@ -55,123 +47,6 @@ import static java.util.Collections.emptySet;
|
||||
*/
|
||||
public final class ClusterAllocationExplanationTests extends ESTestCase {
|
||||
|
||||
private Index i = new Index("foo", "uuid");
|
||||
private ShardRouting primaryShard = ShardRouting.newUnassigned(new ShardId(i, 0), true, StoreRecoverySource.EMPTY_STORE_INSTANCE,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||
private IndexMetaData indexMetaData = IndexMetaData.builder("foo")
|
||||
.settings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, "uuid"))
|
||||
.putInSyncAllocationIds(0, Sets.newHashSet("aid1", "aid2"))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(1)
|
||||
.build();
|
||||
private DiscoveryNode node = new DiscoveryNode("node-0", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
private static Decision.Multi yesDecision = new Decision.Multi();
|
||||
private static Decision.Multi noDecision = new Decision.Multi();
|
||||
|
||||
static {
|
||||
yesDecision.add(Decision.single(Decision.Type.YES, "yes label", "yes please"));
|
||||
noDecision.add(Decision.single(Decision.Type.NO, "no label", "no thanks"));
|
||||
}
|
||||
|
||||
private void assertExplanations(NodeExplanation ne, String finalExplanation, ClusterAllocationExplanation.FinalDecision finalDecision,
|
||||
ClusterAllocationExplanation.StoreCopy storeCopy) {
|
||||
assertEquals(finalExplanation, ne.getFinalExplanation());
|
||||
assertEquals(finalDecision, ne.getFinalDecision());
|
||||
assertEquals(storeCopy, ne.getStoreCopy());
|
||||
}
|
||||
|
||||
public void testDecisionAndExplanation() {
|
||||
Exception e = new IOException("stuff's broke, yo");
|
||||
Exception corruptE = new CorruptIndexException("stuff's corrupt, yo", "");
|
||||
Float nodeWeight = randomFloat();
|
||||
Set<String> activeAllocationIds = new HashSet<>();
|
||||
activeAllocationIds.add("eggplant");
|
||||
ShardRouting primaryStartedShard = ShardRouting.newUnassigned(new ShardId(i, 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "foo"));
|
||||
ShardRouting replicaStartedShard = ShardRouting.newUnassigned(new ShardId(i, 0), false, PeerRecoverySource.INSTANCE,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "foo"));
|
||||
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, e);
|
||||
NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node,
|
||||
yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false);
|
||||
assertExplanations(ne, "the copy of the shard cannot be read",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.IO_ERROR);
|
||||
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight,
|
||||
null, "", activeAllocationIds, false);
|
||||
assertExplanations(ne, "the shard can be assigned",
|
||||
ClusterAllocationExplanation.FinalDecision.YES, ClusterAllocationExplanation.StoreCopy.NONE);
|
||||
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision,
|
||||
nodeWeight, null, "", activeAllocationIds, false);
|
||||
assertExplanations(ne, "there is no copy of the shard available",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.NONE);
|
||||
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, noDecision, nodeWeight,
|
||||
null, "", activeAllocationIds, false);
|
||||
assertExplanations(ne, "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.NONE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, noDecision, nodeWeight,
|
||||
storeStatus, "", activeAllocationIds, false);
|
||||
assertExplanations(ne, "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, corruptE);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision,
|
||||
nodeWeight, storeStatus, "", activeAllocationIds, false);
|
||||
assertExplanations(ne, "the copy of the shard is corrupt",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.CORRUPT);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "banana",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight,
|
||||
storeStatus, "", activeAllocationIds, false);
|
||||
assertExplanations(ne, "the shard can be assigned",
|
||||
ClusterAllocationExplanation.FinalDecision.YES, ClusterAllocationExplanation.StoreCopy.STALE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "banana",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision,
|
||||
nodeWeight, storeStatus, "", activeAllocationIds, false);
|
||||
assertExplanations(ne, "the copy of the shard is stale, allocation ids do not match",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.STALE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight,
|
||||
storeStatus, "node-0", activeAllocationIds, false);
|
||||
assertExplanations(ne, "the shard is already assigned to this node",
|
||||
ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED, ClusterAllocationExplanation.StoreCopy.AVAILABLE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight,
|
||||
storeStatus, "", activeAllocationIds, false);
|
||||
assertExplanations(ne, "the shard can be assigned and the node contains a valid copy of the shard data",
|
||||
ClusterAllocationExplanation.FinalDecision.YES, ClusterAllocationExplanation.StoreCopy.AVAILABLE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision,
|
||||
nodeWeight, storeStatus, "", activeAllocationIds, true);
|
||||
assertExplanations(ne, "the shard's state is still being fetched so it cannot be allocated",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(replicaStartedShard, indexMetaData, node, noDecision,
|
||||
nodeWeight, storeStatus, "", activeAllocationIds, true);
|
||||
assertExplanations(ne, "the shard cannot be assigned because allocation deciders return a NO decision",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE);
|
||||
}
|
||||
|
||||
public void testDecisionEquality() {
|
||||
Decision.Multi d = new Decision.Multi();
|
||||
Decision.Multi d2 = new Decision.Multi();
|
||||
@ -185,67 +60,53 @@ public final class ClusterAllocationExplanationTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public void testExplanationSerialization() throws Exception {
|
||||
ShardId shard = new ShardId("test", "uuid", 0);
|
||||
long allocationDelay = randomIntBetween(0, 500);
|
||||
long remainingDelay = randomIntBetween(0, 500);
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations = new HashMap<>(1);
|
||||
Float nodeWeight = randomFloat();
|
||||
Set<String> activeAllocationIds = new HashSet<>();
|
||||
activeAllocationIds.add("eggplant");
|
||||
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
|
||||
yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false);
|
||||
nodeExplanations.put(ne.getNode(), ne);
|
||||
ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shard, true,
|
||||
"assignedNode", allocationDelay, remainingDelay, null, false, nodeExplanations, null);
|
||||
ClusterAllocationExplanation cae = randomClusterAllocationExplanation(randomBoolean());
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
cae.writeTo(out);
|
||||
StreamInput in = out.bytes().streamInput();
|
||||
ClusterAllocationExplanation cae2 = new ClusterAllocationExplanation(in);
|
||||
assertEquals(shard, cae2.getShard());
|
||||
assertEquals(cae.getShard(), cae2.getShard());
|
||||
assertEquals(cae.isPrimary(), cae2.isPrimary());
|
||||
assertTrue(cae2.isPrimary());
|
||||
assertTrue(cae2.isAssigned());
|
||||
assertEquals("assignedNode", cae2.getAssignedNodeId());
|
||||
assertNull(cae2.getUnassignedInfo());
|
||||
assertEquals(allocationDelay, cae2.getAllocationDelayMillis());
|
||||
assertEquals(remainingDelay, cae2.getRemainingDelayMillis());
|
||||
for (Map.Entry<DiscoveryNode, NodeExplanation> entry : cae2.getNodeExplanations().entrySet()) {
|
||||
NodeExplanation explanation = entry.getValue();
|
||||
assertNotNull(explanation.getStoreStatus());
|
||||
assertNotNull(explanation.getDecision());
|
||||
assertEquals(nodeWeight, explanation.getWeight());
|
||||
assertEquals(cae.getUnassignedInfo(), cae2.getUnassignedInfo());
|
||||
assertEquals(cae.getCurrentNode(), cae2.getCurrentNode());
|
||||
assertEquals(cae.getShardState(), cae2.getShardState());
|
||||
if (cae.getClusterInfo() == null) {
|
||||
assertNull(cae2.getClusterInfo());
|
||||
} else {
|
||||
assertNotNull(cae2.getClusterInfo());
|
||||
assertEquals(cae.getClusterInfo().getNodeMostAvailableDiskUsages().size(),
|
||||
cae2.getClusterInfo().getNodeMostAvailableDiskUsages().size());
|
||||
}
|
||||
assertEquals(cae.getShardAllocationDecision().getAllocateDecision(), cae2.getShardAllocationDecision().getAllocateDecision());
|
||||
assertEquals(cae.getShardAllocationDecision().getMoveDecision(), cae2.getShardAllocationDecision().getMoveDecision());
|
||||
}
|
||||
|
||||
public void testExplanationToXContent() throws Exception {
|
||||
ShardId shardId = new ShardId("foo", "uuid", 0);
|
||||
Decision.Multi d = new Decision.Multi();
|
||||
d.add(Decision.single(Decision.Type.NO, "no label", "because I said no"));
|
||||
d.add(Decision.single(Decision.Type.YES, "yes label", "yes please"));
|
||||
d.add(Decision.single(Decision.Type.THROTTLE, "throttle label", "wait a sec"));
|
||||
Float nodeWeight = 1.5f;
|
||||
Set<String> allocationIds = new HashSet<>();
|
||||
allocationIds.add("bar");
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, new ElasticsearchException("stuff's broke, yo"));
|
||||
NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
|
||||
d, nodeWeight, storeStatus, "node-0", allocationIds, false);
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations = new HashMap<>(1);
|
||||
nodeExplanations.put(ne.getNode(), ne);
|
||||
ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shardId, true,
|
||||
"assignedNode", 42, 42, null, false, nodeExplanations, null);
|
||||
ClusterAllocationExplanation cae = randomClusterAllocationExplanation(true);
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
cae.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
assertEquals("{\"shard\":{\"index\":\"foo\",\"index_uuid\":\"uuid\",\"id\":0,\"primary\":true},\"assigned\":true," +
|
||||
"\"assigned_node_id\":\"assignedNode\",\"shard_state_fetch_pending\":false,\"nodes\":{\"node-0\":" +
|
||||
"{\"node_name\":\"\",\"node_attributes\":{},\"store\":{\"shard_copy\":\"IO_ERROR\",\"store_except" +
|
||||
"ion\":\"ElasticsearchException[stuff's broke, yo]\"},\"final_decision\":\"ALREADY_ASSIGNED\",\"f" +
|
||||
"inal_explanation\":\"the shard is already assigned to this node\",\"weight\":1.5,\"decisions\":[" +
|
||||
"{\"decider\":\"no label\",\"decision\":\"NO\",\"explanation\":\"because I said no\"},{\"decider" +
|
||||
"\":\"yes label\",\"decision\":\"YES\",\"explanation\":\"yes please\"},{\"decider\":\"throttle la" +
|
||||
"bel\",\"decision\":\"THROTTLE\",\"explanation\":\"wait a sec\"}]}}}",
|
||||
builder.string());
|
||||
assertEquals("{\"index\":\"idx\",\"shard\":0,\"primary\":true,\"current_state\":\"started\",\"current_node\":" +
|
||||
"{\"id\":\"node-0\",\"name\":\"\",\"transport_address\":\"" + cae.getCurrentNode().getAddress() +
|
||||
"\",\"weight_ranking\":3},\"can_remain_on_current_node\":\"yes\",\"can_rebalance_cluster\":\"yes\"," +
|
||||
"\"can_rebalance_to_other_node\":\"no\",\"rebalance_explanation\":\"cannot rebalance as no target node exists " +
|
||||
"that can both allocate this shard and improve the cluster balance\"}", builder.string());
|
||||
}
|
||||
|
||||
private static ClusterAllocationExplanation randomClusterAllocationExplanation(boolean assignedShard) {
|
||||
ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(new Index("idx", "123"), 0),
|
||||
assignedShard ? "node-0" : null, true, assignedShard ? ShardRoutingState.STARTED : ShardRoutingState.UNASSIGNED);
|
||||
DiscoveryNode node = assignedShard ? new DiscoveryNode("node-0", buildNewFakeTransportAddress(), emptyMap(), emptySet(),
|
||||
Version.CURRENT) : null;
|
||||
ShardAllocationDecision shardAllocationDecision;
|
||||
if (assignedShard) {
|
||||
MoveDecision moveDecision = MoveDecision.cannotRebalance(Decision.YES, AllocationDecision.NO, 3, null)
|
||||
.withRemainDecision(Decision.YES);
|
||||
shardAllocationDecision = new ShardAllocationDecision(AllocateUnassignedDecision.NOT_TAKEN, moveDecision);
|
||||
} else {
|
||||
AllocateUnassignedDecision allocateDecision = AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.DECIDERS_NO, null);
|
||||
shardAllocationDecision = new ShardAllocationDecision(allocateDecision, MoveDecision.NOT_TAKEN);
|
||||
}
|
||||
return new ClusterAllocationExplanation(shardRouting, node, null, null, shardAllocationDecision);
|
||||
}
|
||||
}
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
|
||||
@ -51,7 +51,6 @@ import org.elasticsearch.plugins.ClusterPlugin;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -71,10 +70,9 @@ public class ClusterModuleTests extends ModuleTestCase {
|
||||
public void allocate(RoutingAllocation allocation) {
|
||||
// noop
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<DiscoveryNode, Float> weighShard(RoutingAllocation allocation, ShardRouting shard) {
|
||||
return new HashMap<>();
|
||||
public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) {
|
||||
throw new UnsupportedOperationException("explain API not supported on FakeShardsAllocator");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ public class AllocateUnassignedDecisionTests extends ESTestCase {
|
||||
assertThat(noDecision.getExplanation(), startsWith("cannot allocate because the cluster is still waiting"));
|
||||
} else {
|
||||
assertThat(noDecision.getExplanation(),
|
||||
startsWith("cannot allocate because a previous copy of the shard existed"));
|
||||
startsWith("cannot allocate because a previous copy of the primary shard existed"));
|
||||
}
|
||||
assertNull(noDecision.getNodeDecisions());
|
||||
assertNull(noDecision.getTargetNode());
|
||||
@ -107,7 +107,7 @@ public class AllocateUnassignedDecisionTests extends ESTestCase {
|
||||
nodeDecisions.add(new NodeAllocationResult(node2, Decision.THROTTLE, 2));
|
||||
AllocateUnassignedDecision throttleDecision = AllocateUnassignedDecision.throttle(nodeDecisions);
|
||||
assertTrue(throttleDecision.isDecisionTaken());
|
||||
assertEquals(AllocationDecision.THROTTLE, throttleDecision.getAllocationDecision());
|
||||
assertEquals(AllocationDecision.THROTTLED, throttleDecision.getAllocationDecision());
|
||||
assertEquals(AllocationStatus.DECIDERS_THROTTLED, throttleDecision.getAllocationStatus());
|
||||
assertThat(throttleDecision.getExplanation(), startsWith("allocation temporarily throttled"));
|
||||
assertEquals(nodeDecisions.stream().sorted().collect(Collectors.toList()), throttleDecision.getNodeDecisions());
|
||||
|
@ -48,21 +48,21 @@ public class AllocationDecisionTests extends ESTestCase {
|
||||
*/
|
||||
public void testValuesOrder() {
|
||||
assertEquals(0, AllocationDecision.YES.ordinal());
|
||||
assertEquals(1, AllocationDecision.THROTTLE.ordinal());
|
||||
assertEquals(1, AllocationDecision.THROTTLED.ordinal());
|
||||
assertEquals(2, AllocationDecision.NO.ordinal());
|
||||
assertEquals(3, AllocationDecision.WORSE_BALANCE.ordinal());
|
||||
assertEquals(4, AllocationDecision.FETCH_PENDING.ordinal());
|
||||
assertEquals(5, AllocationDecision.DELAYED_ALLOCATION.ordinal());
|
||||
assertEquals(4, AllocationDecision.AWAITING_INFO.ordinal());
|
||||
assertEquals(5, AllocationDecision.ALLOCATION_DELAYED.ordinal());
|
||||
assertEquals(6, AllocationDecision.NO_VALID_SHARD_COPY.ordinal());
|
||||
assertEquals(7, AllocationDecision.NO_ATTEMPT.ordinal());
|
||||
AllocationDecision[] decisions = AllocationDecision.values();
|
||||
Arrays.sort(decisions);
|
||||
assertEquals(AllocationDecision.YES, decisions[0]);
|
||||
assertEquals(AllocationDecision.THROTTLE, decisions[1]);
|
||||
assertEquals(AllocationDecision.THROTTLED, decisions[1]);
|
||||
assertEquals(AllocationDecision.NO, decisions[2]);
|
||||
assertEquals(AllocationDecision.WORSE_BALANCE, decisions[3]);
|
||||
assertEquals(AllocationDecision.FETCH_PENDING, decisions[4]);
|
||||
assertEquals(AllocationDecision.DELAYED_ALLOCATION, decisions[5]);
|
||||
assertEquals(AllocationDecision.AWAITING_INFO, decisions[4]);
|
||||
assertEquals(AllocationDecision.ALLOCATION_DELAYED, decisions[5]);
|
||||
assertEquals(AllocationDecision.NO_VALID_SHARD_COPY, decisions[6]);
|
||||
assertEquals(AllocationDecision.NO_ATTEMPT, decisions[7]);
|
||||
}
|
||||
@ -74,7 +74,7 @@ public class AllocationDecisionTests extends ESTestCase {
|
||||
Type type = randomFrom(Type.values());
|
||||
AllocationDecision allocationDecision = AllocationDecision.fromDecisionType(type);
|
||||
AllocationDecision expected = type == Type.NO ? AllocationDecision.NO :
|
||||
type == Type.THROTTLE ? AllocationDecision.THROTTLE : AllocationDecision.YES;
|
||||
type == Type.THROTTLE ? AllocationDecision.THROTTLED : AllocationDecision.YES;
|
||||
assertEquals(expected, allocationDecision);
|
||||
}
|
||||
|
||||
@ -88,11 +88,11 @@ public class AllocationDecisionTests extends ESTestCase {
|
||||
if (allocationStatus == null) {
|
||||
expected = AllocationDecision.YES;
|
||||
} else if (allocationStatus == AllocationStatus.DECIDERS_THROTTLED) {
|
||||
expected = AllocationDecision.THROTTLE;
|
||||
expected = AllocationDecision.THROTTLED;
|
||||
} else if (allocationStatus == AllocationStatus.FETCHING_SHARD_DATA) {
|
||||
expected = AllocationDecision.FETCH_PENDING;
|
||||
expected = AllocationDecision.AWAITING_INFO;
|
||||
} else if (allocationStatus == AllocationStatus.DELAYED_ALLOCATION) {
|
||||
expected = AllocationDecision.DELAYED_ALLOCATION;
|
||||
expected = AllocationDecision.ALLOCATION_DELAYED;
|
||||
} else if (allocationStatus == AllocationStatus.NO_VALID_SHARD_COPY) {
|
||||
expected = AllocationDecision.NO_VALID_SHARD_COPY;
|
||||
} else if (allocationStatus == AllocationStatus.NO_ATTEMPT) {
|
||||
|
@ -45,9 +45,7 @@ import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.test.gateway.TestGatewayAllocator;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
|
||||
@ -257,10 +255,6 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
|
||||
AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(),
|
||||
new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), random()),
|
||||
new TestGatewayAllocator(), new ShardsAllocator() {
|
||||
|
||||
public Map<DiscoveryNode, Float> weighShard(RoutingAllocation allocation, ShardRouting shard) {
|
||||
return new HashMap<DiscoveryNode, Float>();
|
||||
}
|
||||
/*
|
||||
* // this allocator tries to rebuild this scenario where a rebalance is
|
||||
* // triggered solely by the primary overload on node [1] where a shard
|
||||
@ -327,6 +321,11 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) {
|
||||
throw new UnsupportedOperationException("explain not supported");
|
||||
}
|
||||
}, EmptyClusterInfoService.INSTANCE);
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder();
|
||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
|
||||
|
@ -60,8 +60,8 @@ public class BalancedSingleShardTests extends ESAllocationTestCase {
|
||||
ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(),
|
||||
randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.UNASSIGNED, ShardRoutingState.RELOCATING));
|
||||
ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard();
|
||||
MoveDecision rebalanceDecision = allocator.decideRebalance(shard, newRoutingAllocation(
|
||||
new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState));
|
||||
MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, newRoutingAllocation(
|
||||
new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState)).getMoveDecision();
|
||||
assertSame(MoveDecision.NOT_TAKEN, rebalanceDecision);
|
||||
}
|
||||
|
||||
@ -72,9 +72,9 @@ public class BalancedSingleShardTests extends ESAllocationTestCase {
|
||||
RoutingAllocation routingAllocation = newRoutingAllocation(
|
||||
new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState);
|
||||
routingAllocation.setHasPendingAsyncFetch();
|
||||
MoveDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation);
|
||||
assertNotNull(rebalanceDecision.getCanRebalanceDecision());
|
||||
assertEquals(AllocationDecision.FETCH_PENDING, rebalanceDecision.getAllocationDecision());
|
||||
MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision();
|
||||
assertNotNull(rebalanceDecision.getClusterRebalanceDecision());
|
||||
assertEquals(AllocationDecision.AWAITING_INFO, rebalanceDecision.getAllocationDecision());
|
||||
assertThat(rebalanceDecision.getExplanation(),
|
||||
startsWith("cannot rebalance as information about existing copies of this shard in the cluster is still being gathered"));
|
||||
assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size());
|
||||
@ -96,15 +96,15 @@ public class BalancedSingleShardTests extends ESAllocationTestCase {
|
||||
ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard();
|
||||
RoutingAllocation routingAllocation = newRoutingAllocation(
|
||||
new AllocationDeciders(Settings.EMPTY, Collections.singleton(noRebalanceDecider)), clusterState);
|
||||
MoveDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation);
|
||||
assertEquals(canRebalanceDecision.type(), rebalanceDecision.getCanRebalanceDecision().type());
|
||||
MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision();
|
||||
assertEquals(canRebalanceDecision.type(), rebalanceDecision.getClusterRebalanceDecision().type());
|
||||
assertEquals(AllocationDecision.fromDecisionType(canRebalanceDecision.type()), rebalanceDecision.getAllocationDecision());
|
||||
assertThat(rebalanceDecision.getExplanation(), containsString(canRebalanceDecision.type() == Type.THROTTLE ?
|
||||
"rebalancing is throttled" : "rebalancing is not allowed"));
|
||||
assertNotNull(rebalanceDecision.getNodeDecisions());
|
||||
assertNull(rebalanceDecision.getTargetNode());
|
||||
assertEquals(1, rebalanceDecision.getCanRebalanceDecision().getDecisions().size());
|
||||
for (Decision subDecision : rebalanceDecision.getCanRebalanceDecision().getDecisions()) {
|
||||
assertEquals(1, rebalanceDecision.getClusterRebalanceDecision().getDecisions().size());
|
||||
for (Decision subDecision : rebalanceDecision.getClusterRebalanceDecision().getDecisions()) {
|
||||
assertEquals("foobar", ((Decision.Single) subDecision).getExplanation());
|
||||
}
|
||||
|
||||
@ -121,7 +121,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase {
|
||||
Tuple<ClusterState, MoveDecision> rebalance = setupStateAndRebalance(canAllocateDecider, Settings.EMPTY, true);
|
||||
ClusterState clusterState = rebalance.v1();
|
||||
MoveDecision rebalanceDecision = rebalance.v2();
|
||||
assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type());
|
||||
assertEquals(Type.YES, rebalanceDecision.getClusterRebalanceDecision().type());
|
||||
assertNotNull(rebalanceDecision.getExplanation());
|
||||
assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size());
|
||||
}
|
||||
@ -136,7 +136,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase {
|
||||
Tuple<ClusterState, MoveDecision> rebalance = setupStateAndRebalance(canAllocateDecider, Settings.EMPTY, false);
|
||||
ClusterState clusterState = rebalance.v1();
|
||||
MoveDecision rebalanceDecision = rebalance.v2();
|
||||
assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type());
|
||||
assertEquals(Type.YES, rebalanceDecision.getClusterRebalanceDecision().type());
|
||||
assertEquals(AllocationDecision.NO, rebalanceDecision.getAllocationDecision());
|
||||
assertThat(rebalanceDecision.getExplanation(), startsWith(
|
||||
"cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance"));
|
||||
@ -161,7 +161,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase {
|
||||
Tuple<ClusterState, MoveDecision> rebalance = setupStateAndRebalance(canAllocateDecider, balancerSettings, false);
|
||||
ClusterState clusterState = rebalance.v1();
|
||||
MoveDecision rebalanceDecision = rebalance.v2();
|
||||
assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type());
|
||||
assertEquals(Type.YES, rebalanceDecision.getClusterRebalanceDecision().type());
|
||||
assertEquals(AllocationDecision.NO, rebalanceDecision.getAllocationDecision());
|
||||
assertNotNull(rebalanceDecision.getExplanation());
|
||||
assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size());
|
||||
@ -232,7 +232,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase {
|
||||
routingAllocation = newRoutingAllocation(new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState);
|
||||
routingAllocation.debugDecision(true);
|
||||
ShardRouting shard = clusterState.getRoutingNodes().activePrimary(shardToRebalance.shardId());
|
||||
MoveDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation);
|
||||
MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision();
|
||||
assertEquals(shardToRebalance.relocatingNodeId(), rebalanceDecision.getTargetNode().getId());
|
||||
// make sure all excluded nodes returned a NO decision
|
||||
for (NodeAllocationResult nodeResult : rebalanceDecision.getNodeDecisions()) {
|
||||
@ -325,7 +325,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase {
|
||||
BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings);
|
||||
RoutingAllocation routingAllocation = newRoutingAllocation(
|
||||
new AllocationDeciders(Settings.EMPTY, Arrays.asList(allocationDecider, rebalanceDecider)), clusterState);
|
||||
return allocator.decideRebalance(shardRouting, routingAllocation);
|
||||
return allocator.decideShardAllocation(shardRouting, routingAllocation).getMoveDecision();
|
||||
}
|
||||
|
||||
private ClusterState addNodesToClusterState(ClusterState clusterState, int numNodesToAdd) {
|
||||
@ -357,7 +357,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase {
|
||||
ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard();
|
||||
RoutingAllocation routingAllocation = newRoutingAllocation(
|
||||
new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState);
|
||||
MoveDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation);
|
||||
MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision();
|
||||
|
||||
if (rebalanceExpected == false) {
|
||||
assertAssignedNodeRemainsSame(allocator, routingAllocation, shard);
|
||||
|
@ -72,7 +72,6 @@ public class MoveDecisionTests extends ESTestCase {
|
||||
assertFalse(stay.forceMove());
|
||||
assertTrue(stay.isDecisionTaken());
|
||||
assertNull(stay.getNodeDecisions());
|
||||
assertNotNull(stay.getExplanation());
|
||||
assertEquals(AllocationDecision.NO_ATTEMPT, stay.getAllocationDecision());
|
||||
|
||||
stay = MoveDecision.stay(Decision.YES);
|
||||
@ -80,7 +79,6 @@ public class MoveDecisionTests extends ESTestCase {
|
||||
assertFalse(stay.forceMove());
|
||||
assertTrue(stay.isDecisionTaken());
|
||||
assertNull(stay.getNodeDecisions());
|
||||
assertEquals("shard can remain on its current node", stay.getExplanation());
|
||||
assertEquals(AllocationDecision.NO_ATTEMPT, stay.getAllocationDecision());
|
||||
}
|
||||
|
||||
|
@ -1,20 +1,24 @@
|
||||
---
|
||||
"cluster shard allocation explanation test":
|
||||
- skip:
|
||||
version: " - 5.1.99"
|
||||
reason: allocation explain api format is different in versions < 5.2.0
|
||||
|
||||
- do:
|
||||
# there aren't any unassigned shards to explain
|
||||
catch: /unable to find any shards to explain/
|
||||
catch: /illegal_state_exception/
|
||||
cluster.allocation_explain: {}
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
cluster.state:
|
||||
metric: [ master_node ]
|
||||
|
||||
- set: {master_node: node_id}
|
||||
|
||||
# This relies on there only being a single node in the test cluster, which
|
||||
# is currently true, but if this changes in the future this test will need
|
||||
# to be changed
|
||||
@ -22,24 +26,22 @@
|
||||
cluster.allocation_explain:
|
||||
body: { "index": "test", "shard": 0, "primary": true }
|
||||
|
||||
- match: { assigned: true }
|
||||
# - match: { assigned_node_id: $node_id }
|
||||
- is_true: assigned_node_id
|
||||
- match: { shard.index: "test" }
|
||||
- match: { shard.id: 0 }
|
||||
- match: { shard.primary: true }
|
||||
# unfortunately can't test these because they break with multi-node backwords
|
||||
# compat REST tests
|
||||
# - is_true: nodes.$node_id.node_name
|
||||
# - match: { nodes.$node_id.node_attributes.testattr: "test" }
|
||||
# - match: { nodes.$node_id.node_attributes.portsfile: "true" }
|
||||
# - match: { nodes.$node_id.final_decision: "CURRENTLY_ASSIGNED" }
|
||||
# - match: { nodes.$node_id.weight: 0.0 }
|
||||
# - match: { nodes.$node_id.decisions.0.decider: "same_shard" }
|
||||
# - match: { nodes.$node_id.decisions.0.decision: "NO" }
|
||||
- match: { current_state: "started" }
|
||||
- is_true: current_node.id
|
||||
- match: { index: "test" }
|
||||
- match: { shard: 0 }
|
||||
- match: { primary: true }
|
||||
- match: { can_remain_on_current_node: "yes" }
|
||||
- match: { can_rebalance_cluster: "no" }
|
||||
- match: { can_rebalance_to_other_node: "no" }
|
||||
- match: { rebalance_explanation: "rebalancing is not allowed" }
|
||||
|
||||
---
|
||||
"cluster shard allocation explanation test with empty request":
|
||||
- skip:
|
||||
version: " - 5.1.99"
|
||||
reason: allocation explain api format is different in versions < 5.2.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
@ -49,22 +51,16 @@
|
||||
cluster.state:
|
||||
metric: [ master_node ]
|
||||
|
||||
- set: {master_node: node_id}
|
||||
|
||||
- do:
|
||||
cluster.allocation_explain:
|
||||
include_disk_info: true
|
||||
|
||||
- match: { assigned: false }
|
||||
- match: { current_state: "unassigned" }
|
||||
- match: { unassigned_info.reason: "INDEX_CREATED" }
|
||||
- is_true: unassigned_info.at
|
||||
- match: { shard.index: "test" }
|
||||
- match: { shard.id: 0 }
|
||||
- match: { shard.primary: false }
|
||||
- match: { index: "test" }
|
||||
- match: { shard: 0 }
|
||||
- match: { primary: false }
|
||||
- is_true: cluster_info
|
||||
# - is_true: nodes.$node_id.node_name
|
||||
# - match: { nodes.$node_id.node_attributes.testattr: "test" }
|
||||
# - match: { nodes.$node_id.node_attributes.portsfile: "true" }
|
||||
# - match: { nodes.$node_id.final_decision: "NO" }
|
||||
# - match: { nodes.$node_id.decisions.0.decider: "same_shard" }
|
||||
# - match: { nodes.$node_id.decisions.0.decision: "NO" }
|
||||
- match: { can_allocate: "no" }
|
||||
- match: { allocate_explanation : "cannot allocate because allocation is not permitted to any of the nodes" }
|
||||
|
Loading…
x
Reference in New Issue
Block a user