Adds setting level to allocation decider explanations (#22268)
The allocation decider explanation messages where improved in #21771 to include the specific Elasticsearch setting that contributed to the decision taken by the decider. This commit improves upon the explanation message output by including whether the setting was an index level setting or a cluster level setting. This will further help the user understand and locate the setting that is the cause of shards remaining unassigned or remaining on their current node.
This commit is contained in:
parent
a04dcfb95b
commit
ad4405f244
|
@ -125,7 +125,7 @@ public class AwarenessAllocationDecider extends AllocationDecider {
|
|||
private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation, boolean moveToNode) {
|
||||
if (awarenessAttributes.length == 0) {
|
||||
return allocation.decision(Decision.YES, NAME,
|
||||
"allocation awareness is not enabled, set [%s] to enable it",
|
||||
"allocation awareness is not enabled, set cluster setting [%s] to enable it",
|
||||
CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey());
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ public class AwarenessAllocationDecider extends AllocationDecider {
|
|||
// the node the shard exists on must be associated with an awareness attribute
|
||||
if (!node.node().getAttributes().containsKey(awarenessAttribute)) {
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"node does not contain the awareness attribute [%s]; required attributes [%s=%s]",
|
||||
"node does not contain the awareness attribute [%s]; required attributes cluster setting [%s=%s]",
|
||||
awarenessAttribute, CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(),
|
||||
allocation.debugDecision() ? Strings.arrayToCommaDelimitedString(awarenessAttributes) : null);
|
||||
}
|
||||
|
|
|
@ -120,13 +120,13 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
|
|||
// check if there are unassigned primaries.
|
||||
if ( allocation.routingNodes().hasUnassignedPrimaries() ) {
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the cluster has unassigned primary shards and [%s] is set to [%s]",
|
||||
"the cluster has unassigned primary shards and cluster setting [%s] is set to [%s]",
|
||||
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type);
|
||||
}
|
||||
// check if there are initializing primaries that don't have a relocatingNodeId entry.
|
||||
if ( allocation.routingNodes().hasInactivePrimaries() ) {
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the cluster has inactive primary shards and [%s] is set to [%s]",
|
||||
"the cluster has inactive primary shards and cluster setting [%s] is set to [%s]",
|
||||
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type);
|
||||
}
|
||||
|
||||
|
@ -136,14 +136,14 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
|
|||
// check if there are unassigned shards.
|
||||
if (allocation.routingNodes().hasUnassignedShards() ) {
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the cluster has unassigned shards and [%s] is set to [%s]",
|
||||
"the cluster has unassigned shards and cluster setting [%s] is set to [%s]",
|
||||
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type);
|
||||
}
|
||||
// in case all indices are assigned, are there initializing shards which
|
||||
// are not relocating?
|
||||
if ( allocation.routingNodes().hasInactiveShards() ) {
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the cluster has inactive shards and [%s] is set to [%s]",
|
||||
"the cluster has inactive shards and cluster setting [%s] is set to [%s]",
|
||||
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
|
|||
int relocatingShards = allocation.routingNodes().getRelocatingShardCount();
|
||||
if (relocatingShards >= clusterConcurrentRebalance) {
|
||||
return allocation.decision(Decision.THROTTLE, NAME,
|
||||
"reached the limit of concurrently rebalancing shards [%d], [%s=%d]",
|
||||
"reached the limit of concurrently rebalancing shards [%d], cluster setting [%s=%d]",
|
||||
relocatingShards,
|
||||
CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(),
|
||||
clusterConcurrentRebalance);
|
||||
|
|
|
@ -138,7 +138,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId());
|
||||
}
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the node is above the low watermark [%s=%s], having less than the minimum required [%s] free space, actual free: [%s]",
|
||||
"the node is above the low watermark cluster setting [%s=%s], having less than the minimum required [%s] free " +
|
||||
"space, actual free: [%s]",
|
||||
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(),
|
||||
diskThresholdSettings.getLowWatermarkRaw(),
|
||||
diskThresholdSettings.getFreeBytesThresholdLow(), new ByteSizeValue(freeBytes));
|
||||
|
@ -162,8 +163,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId());
|
||||
}
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the node is above the high watermark [%s=%s], having less than the minimum required [%s] free space, " +
|
||||
"actual free: [%s]",
|
||||
"the node is above the high watermark cluster setting [%s=%s], having less than the minimum required [%s] free " +
|
||||
"space, actual free: [%s]",
|
||||
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||
diskThresholdSettings.getHighWatermarkRaw(),
|
||||
diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytes));
|
||||
|
@ -180,8 +181,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
Strings.format1Decimals(usedDiskPercentage, "%"), node.nodeId());
|
||||
}
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the node is above the low watermark [%s=%s], using more disk space than the maximum allowed [%s%%], " +
|
||||
"actual free: [%s%%]",
|
||||
"the node is above the low watermark cluster setting [%s=%s], using more disk space than the maximum allowed " +
|
||||
"[%s%%], actual free: [%s%%]",
|
||||
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(),
|
||||
diskThresholdSettings.getLowWatermarkRaw(), usedDiskThresholdLow, freeDiskPercentage);
|
||||
} else if (freeDiskPercentage > diskThresholdSettings.getFreeDiskThresholdHigh()) {
|
||||
|
@ -206,8 +207,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
Strings.format1Decimals(freeDiskPercentage, "%"), node.nodeId());
|
||||
}
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the node is above the high watermark [%s=%s], using more disk space than the maximum allowed [%s%%], " +
|
||||
"actual free: [%s%%]",
|
||||
"the node is above the high watermark cluster setting [%s=%s], using more disk space than the maximum allowed " +
|
||||
"[%s%%], actual free: [%s%%]",
|
||||
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||
diskThresholdSettings.getHighWatermarkRaw(), usedDiskThresholdHigh, freeDiskPercentage);
|
||||
}
|
||||
|
@ -222,7 +223,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
"{} free bytes threshold ({} bytes free), preventing allocation",
|
||||
node.nodeId(), diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytesAfterShard);
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"allocating the shard to this node will bring the node above the high watermark [%s=%s] " +
|
||||
"allocating the shard to this node will bring the node above the high watermark cluster setting [%s=%s] " +
|
||||
"and cause it to have less than the minimum required [%s] of free space (free bytes after shard added: [%s])",
|
||||
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||
diskThresholdSettings.getHighWatermarkRaw(),
|
||||
|
@ -234,7 +235,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
node.nodeId(), Strings.format1Decimals(diskThresholdSettings.getFreeDiskThresholdHigh(), "%"),
|
||||
Strings.format1Decimals(freeSpaceAfterShard, "%"));
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"allocating the shard to this node will bring the node above the high watermark [%s=%s] " +
|
||||
"allocating the shard to this node will bring the node above the high watermark cluster setting [%s=%s] " +
|
||||
"and cause it to use more disk space than the maximum allowed [%s%%] (free space after shard added: [%s%%])",
|
||||
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||
diskThresholdSettings.getHighWatermarkRaw(), usedDiskThresholdHigh, freeSpaceAfterShard);
|
||||
|
@ -279,7 +280,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId());
|
||||
}
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the shard cannot remain on this node because it is above the high watermark [%s=%s] " +
|
||||
"the shard cannot remain on this node because it is above the high watermark cluster setting [%s=%s] " +
|
||||
"and there is less than the required [%s] free space on node, actual free: [%s]",
|
||||
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||
diskThresholdSettings.getHighWatermarkRaw(),
|
||||
|
@ -291,7 +292,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
diskThresholdSettings.getFreeDiskThresholdHigh(), freeDiskPercentage, node.nodeId());
|
||||
}
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the shard cannot remain on this node because it is above the high watermark [%s=%s] " +
|
||||
"the shard cannot remain on this node because it is above the high watermark cluster setting [%s=%s] " +
|
||||
"and there is less than the required [%s%%] free disk on node, actual free: [%s%%]",
|
||||
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||
diskThresholdSettings.getHighWatermarkRaw(),
|
||||
|
|
|
@ -189,10 +189,12 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
}
|
||||
|
||||
private static String setting(Rebalance rebalance, boolean usedIndexSetting) {
|
||||
StringBuilder buf = new StringBuilder("[");
|
||||
StringBuilder buf = new StringBuilder();
|
||||
if (usedIndexSetting) {
|
||||
buf.append("index setting [");
|
||||
buf.append(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey());
|
||||
} else {
|
||||
buf.append("cluster setting [");
|
||||
buf.append(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey());
|
||||
}
|
||||
buf.append("=").append(rebalance.toString().toLowerCase(Locale.ROOT)).append("]");
|
||||
|
|
|
@ -141,19 +141,19 @@ public class FilterAllocationDecider extends AllocationDecider {
|
|||
private Decision shouldIndexFilter(IndexMetaData indexMd, RoutingNode node, RoutingAllocation allocation) {
|
||||
if (indexMd.requireFilters() != null) {
|
||||
if (!indexMd.requireFilters().match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match [%s] filters [%s]",
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match index setting [%s] filters [%s]",
|
||||
IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX, indexMd.requireFilters());
|
||||
}
|
||||
}
|
||||
if (indexMd.includeFilters() != null) {
|
||||
if (!indexMd.includeFilters().match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match [%s] filters [%s]",
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match index setting [%s] filters [%s]",
|
||||
IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_PREFIX, indexMd.includeFilters());
|
||||
}
|
||||
}
|
||||
if (indexMd.excludeFilters() != null) {
|
||||
if (indexMd.excludeFilters().match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node matches [%s] filters [%s]",
|
||||
return allocation.decision(Decision.NO, NAME, "node matches index setting [%s] filters [%s]",
|
||||
IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey(), indexMd.excludeFilters());
|
||||
}
|
||||
}
|
||||
|
@ -163,19 +163,19 @@ public class FilterAllocationDecider extends AllocationDecider {
|
|||
private Decision shouldClusterFilter(RoutingNode node, RoutingAllocation allocation) {
|
||||
if (clusterRequireFilters != null) {
|
||||
if (!clusterRequireFilters.match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match [%s] filters [%s]",
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match cluster setting [%s] filters [%s]",
|
||||
CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX, clusterRequireFilters);
|
||||
}
|
||||
}
|
||||
if (clusterIncludeFilters != null) {
|
||||
if (!clusterIncludeFilters.match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node does not [%s] filters [%s]",
|
||||
return allocation.decision(Decision.NO, NAME, "node does not cluster setting [%s] filters [%s]",
|
||||
CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX, clusterIncludeFilters);
|
||||
}
|
||||
}
|
||||
if (clusterExcludeFilters != null) {
|
||||
if (clusterExcludeFilters.match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node matches [%s] filters [%s]",
|
||||
return allocation.decision(Decision.NO, NAME, "node matches cluster setting [%s] filters [%s]",
|
||||
CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX, clusterExcludeFilters);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -88,7 +88,8 @@ public class SameShardAllocationDecider extends AllocationDecider {
|
|||
String host = checkNodeOnSameHostAddress ? node.node().getHostAddress() : node.node().getHostName();
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"the shard cannot be allocated on host %s [%s], where it already exists on node [%s]; " +
|
||||
"set [%s] to false to allow multiple nodes on the same host to hold the same shard copies",
|
||||
"set cluster setting [%s] to false to allow multiple nodes on the same host to hold the same " +
|
||||
"shard copies",
|
||||
hostType, host, node.nodeId(), CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -122,12 +122,12 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
|
||||
if (clusterShardLimit > 0 && decider.test(nodeShardCount, clusterShardLimit)) {
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"too many shards [%d] allocated to this node, [%s=%d]",
|
||||
"too many shards [%d] allocated to this node, cluster setting [%s=%d]",
|
||||
nodeShardCount, CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), clusterShardLimit);
|
||||
}
|
||||
if (indexShardLimit > 0 && decider.test(indexShardCount, indexShardLimit)) {
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"too many shards [%d] allocated to this node for index [%s], [%s=%d]",
|
||||
"too many shards [%d] allocated to this node for index [%s], index setting [%s=%d]",
|
||||
indexShardCount, shardRouting.getIndexName(), INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), indexShardLimit);
|
||||
}
|
||||
return allocation.decision(Decision.YES, NAME,
|
||||
|
@ -157,7 +157,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
}
|
||||
if (clusterShardLimit >= 0 && nodeShardCount >= clusterShardLimit) {
|
||||
return allocation.decision(Decision.NO, NAME,
|
||||
"too many shards [%d] allocated to this node, [%s=%d]",
|
||||
"too many shards [%d] allocated to this node, cluster setting [%s=%d]",
|
||||
nodeShardCount, CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), clusterShardLimit);
|
||||
}
|
||||
return allocation.decision(Decision.YES, NAME,
|
||||
|
|
|
@ -126,7 +126,8 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
}
|
||||
if (primariesInRecovery >= primariesInitialRecoveries) {
|
||||
// TODO: Should index creation not be throttled for primary shards?
|
||||
return allocation.decision(THROTTLE, NAME, "reached the limit of ongoing initial primary recoveries [%d], [%s=%d]",
|
||||
return allocation.decision(THROTTLE, NAME,
|
||||
"reached the limit of ongoing initial primary recoveries [%d], cluster setting [%s=%d]",
|
||||
primariesInRecovery, CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(),
|
||||
primariesInitialRecoveries);
|
||||
} else {
|
||||
|
@ -140,7 +141,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId());
|
||||
if (currentInRecoveries >= concurrentIncomingRecoveries) {
|
||||
return allocation.decision(THROTTLE, NAME,
|
||||
"reached the limit of incoming shard recoveries [%d], [%s=%d] (can also be set via [%s])",
|
||||
"reached the limit of incoming shard recoveries [%d], cluster setting [%s=%d] (can also be set via [%s])",
|
||||
currentInRecoveries, CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(),
|
||||
concurrentIncomingRecoveries,
|
||||
CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey());
|
||||
|
@ -154,7 +155,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
if (primaryNodeOutRecoveries >= concurrentOutgoingRecoveries) {
|
||||
return allocation.decision(THROTTLE, NAME,
|
||||
"reached the limit of outgoing shard recoveries [%d] on the node [%s] which holds the primary, " +
|
||||
"[%s=%d] (can also be set via [%s])",
|
||||
"cluster setting [%s=%d] (can also be set via [%s])",
|
||||
primaryNodeOutRecoveries, node.nodeId(),
|
||||
CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(),
|
||||
concurrentOutgoingRecoveries,
|
||||
|
|
|
@ -135,13 +135,15 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
|||
|
||||
assertEquals(d.type(), Decision.Type.NO);
|
||||
if (noAttrNode.equals(nodeName)) {
|
||||
assertThat(d.toString(), containsString("node does not match [index.routing.allocation.include] filters [foo:\"bar\"]"));
|
||||
assertThat(d.toString(), containsString("node does not match index setting [index.routing.allocation.include] " +
|
||||
"filters [foo:\"bar\"]"));
|
||||
assertNull(storeStatus);
|
||||
assertEquals("the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
explanation.getFinalExplanation());
|
||||
assertEquals(ClusterAllocationExplanation.FinalDecision.NO, finalDecision);
|
||||
} else if (barAttrNode.equals(nodeName)) {
|
||||
assertThat(d.toString(), containsString("node does not match [index.routing.allocation.include] filters [foo:\"bar\"]"));
|
||||
assertThat(d.toString(), containsString("node does not match index setting [index.routing.allocation.include] " +
|
||||
"filters [foo:\"bar\"]"));
|
||||
barAttrWeight = weight;
|
||||
assertNull(storeStatus);
|
||||
assertEquals("the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
|
|
|
@ -786,8 +786,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
|||
} catch (IllegalArgumentException e) {
|
||||
assertThat("can't be allocated because there isn't enough room: " + e.getMessage(),
|
||||
e.getMessage(),
|
||||
containsString("the node is above the low watermark [cluster.routing.allocation.disk.watermark.low=0.7], using " +
|
||||
"more disk space than the maximum allowed [70.0%], actual free: [26.0%]"));
|
||||
containsString("the node is above the low watermark cluster setting " +
|
||||
"[cluster.routing.allocation.disk.watermark.low=0.7], using more disk space than the maximum " +
|
||||
"allowed [70.0%], actual free: [26.0%]"));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -858,7 +859,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
|||
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
|
||||
assertThat(decision.type(), equalTo(Decision.Type.NO));
|
||||
assertThat(((Decision.Single) decision).getExplanation(), containsString(
|
||||
"the shard cannot remain on this node because it is above the high watermark " +
|
||||
"the shard cannot remain on this node because it is above the high watermark cluster setting " +
|
||||
"[cluster.routing.allocation.disk.watermark.high=70%] and there is less than the required [30.0%] free disk on node, " +
|
||||
"actual free: [20.0%]"));
|
||||
|
||||
|
@ -890,12 +891,12 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
|||
assertThat(decision.type(), equalTo(Decision.Type.NO));
|
||||
if (fooRouting.recoverySource().getType() == RecoverySource.Type.EMPTY_STORE) {
|
||||
assertThat(((Decision.Single) decision).getExplanation(), containsString(
|
||||
"the node is above the high watermark [cluster.routing.allocation.disk.watermark.high=70%], using more disk space than " +
|
||||
"the maximum allowed [70.0%], actual free: [20.0%]"));
|
||||
"the node is above the high watermark cluster setting [cluster.routing.allocation.disk.watermark.high=70%], using " +
|
||||
"more disk space than the maximum allowed [70.0%], actual free: [20.0%]"));
|
||||
} else {
|
||||
assertThat(((Decision.Single) decision).getExplanation(), containsString(
|
||||
"the node is above the low watermark [cluster.routing.allocation.disk.watermark.low=60%], using more disk space than " +
|
||||
"the maximum allowed [60.0%], actual free: [20.0%]"));
|
||||
"the node is above the low watermark cluster setting [cluster.routing.allocation.disk.watermark.low=60%], using more " +
|
||||
"disk space than the maximum allowed [60.0%], actual free: [20.0%]"));
|
||||
}
|
||||
|
||||
// Creating AllocationService instance and the services it depends on...
|
||||
|
|
|
@ -106,8 +106,8 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
|
|||
decision = decider.canAllocate(test_0, new RoutingNode("node_1", node_1), allocation);
|
||||
assertEquals(mostAvailableUsage.toString(), Decision.Type.NO, decision.type());
|
||||
assertThat(((Decision.Single) decision).getExplanation(), containsString(
|
||||
"the node is above the high watermark [cluster.routing.allocation.disk.watermark.high=90%], using more disk space than " +
|
||||
"the maximum allowed [90.0%]"));
|
||||
"the node is above the high watermark cluster setting [cluster.routing.allocation.disk.watermark.high=90%], using more " +
|
||||
"disk space than the maximum allowed [90.0%]"));
|
||||
}
|
||||
|
||||
public void testCanRemainUsesLeastAvailableSpace() {
|
||||
|
@ -181,8 +181,8 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
|
|||
decision = decider.canRemain(test_1, new RoutingNode("node_1", node_1), allocation);
|
||||
assertEquals(Decision.Type.NO, decision.type());
|
||||
assertThat(((Decision.Single) decision).getExplanation(), containsString("the shard cannot remain on this node because it is " +
|
||||
"above the high watermark [cluster.routing.allocation.disk.watermark.high=90%] and there is less than the required [10.0%] " +
|
||||
"free disk on node, actual free: [9.0%]"));
|
||||
"above the high watermark cluster setting [cluster.routing.allocation.disk.watermark.high=90%] and there is less than " +
|
||||
"the required [10.0%] free disk on node, actual free: [9.0%]"));
|
||||
try {
|
||||
decider.canRemain(test_0, new RoutingNode("node_1", node_1), allocation);
|
||||
fail("not allocated on this node");
|
||||
|
|
Loading…
Reference in New Issue