Add more logging to MockDiskUsagesIT (#42424)
This commit adds a log message containing the routing table, emitted on each iteration of the failing assertBusy() in #40174. It also modernizes the code a bit.
This commit is contained in:
parent
f3c33d6d96
commit
c0974a9813
|
@ -19,10 +19,9 @@
|
||||||
|
|
||||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
|
||||||
import org.elasticsearch.cluster.ClusterInfo;
|
import org.elasticsearch.cluster.ClusterInfo;
|
||||||
import org.elasticsearch.cluster.ClusterInfoService;
|
import org.elasticsearch.cluster.ClusterInfoService;
|
||||||
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.DiskUsage;
|
import org.elasticsearch.cluster.DiskUsage;
|
||||||
import org.elasticsearch.cluster.MockInternalClusterInfoService;
|
import org.elasticsearch.cluster.MockInternalClusterInfoService;
|
||||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||||
|
@ -33,10 +32,9 @@ import org.elasticsearch.plugins.Plugin;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
@ -50,18 +48,12 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
||||||
@Override
|
@Override
|
||||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||||
// Use the mock internal cluster info service, which has fake-able disk usages
|
// Use the mock internal cluster info service, which has fake-able disk usages
|
||||||
return Arrays.asList(MockInternalClusterInfoService.TestPlugin.class);
|
return Collections.singletonList(MockInternalClusterInfoService.TestPlugin.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception {
|
public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception {
|
||||||
List<String> nodes = internalCluster().startNodes(3);
|
List<String> nodes = internalCluster().startNodes(3);
|
||||||
|
|
||||||
// Wait for all 3 nodes to be up
|
|
||||||
assertBusy(() -> {
|
|
||||||
NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get();
|
|
||||||
assertThat(resp.getNodes().size(), equalTo(3));
|
|
||||||
});
|
|
||||||
|
|
||||||
// Start with all nodes at 50% usage
|
// Start with all nodes at 50% usage
|
||||||
final MockInternalClusterInfoService cis = (MockInternalClusterInfoService)
|
final MockInternalClusterInfoService cis = (MockInternalClusterInfoService)
|
||||||
internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName());
|
internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName());
|
||||||
|
@ -75,32 +67,30 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
||||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||||
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "20b" : "80%")
|
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "20b" : "80%")
|
||||||
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%")
|
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%")
|
||||||
.put(
|
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(),
|
||||||
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(),
|
|
||||||
watermarkBytes ? "0b" : "100%")
|
watermarkBytes ? "0b" : "100%")
|
||||||
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get();
|
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get();
|
||||||
// Create an index with 10 shards so we can check allocation for it
|
// Create an index with 10 shards so we can check allocation for it
|
||||||
prepareCreate("test").setSettings(Settings.builder()
|
prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put("number_of_shards", 10)
|
.put("number_of_shards", 10)
|
||||||
.put("number_of_replicas", 0)
|
.put("number_of_replicas", 0)).get();
|
||||||
.put("index.routing.allocation.exclude._name", "")).get();
|
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
|
||||||
// Block until the "fake" cluster info is retrieved at least once
|
// Block until the "fake" cluster info is retrieved at least once
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
ClusterInfo info = cis.getClusterInfo();
|
final ClusterInfo info = cis.getClusterInfo();
|
||||||
logger.info("--> got: {} nodes", info.getNodeLeastAvailableDiskUsages().size());
|
logger.info("--> got: {} nodes", info.getNodeLeastAvailableDiskUsages().size());
|
||||||
assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThan(0));
|
assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThan(0));
|
||||||
});
|
});
|
||||||
|
|
||||||
final List<String> realNodeNames = new ArrayList<>();
|
final List<String> realNodeNames = new ArrayList<>();
|
||||||
ClusterStateResponse resp = client().admin().cluster().prepareState().get();
|
{
|
||||||
Iterator<RoutingNode> iter = resp.getState().getRoutingNodes().iterator();
|
final ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
|
||||||
while (iter.hasNext()) {
|
for (final RoutingNode node : clusterState.getRoutingNodes()) {
|
||||||
RoutingNode node = iter.next();
|
|
||||||
realNodeNames.add(node.nodeId());
|
realNodeNames.add(node.nodeId());
|
||||||
logger.info("--> node {} has {} shards",
|
logger.info("--> node {} has {} shards",
|
||||||
node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
|
node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the disk usages so one node has now passed the high watermark
|
// Update the disk usages so one node has now passed the high watermark
|
||||||
|
@ -108,17 +98,15 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
||||||
cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50));
|
cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50));
|
||||||
cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 0)); // nothing free on node3
|
cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 0)); // nothing free on node3
|
||||||
|
|
||||||
// Retrieve the count of shards on each node
|
|
||||||
final Map<String, Integer> nodesToShardCount = new HashMap<>();
|
|
||||||
|
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
ClusterStateResponse resp12 = client().admin().cluster().prepareState().get();
|
final ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
|
||||||
Iterator<RoutingNode> iter12 = resp12.getState().getRoutingNodes().iterator();
|
logger.info("--> {}", clusterState.routingTable());
|
||||||
while (iter12.hasNext()) {
|
|
||||||
RoutingNode node = iter12.next();
|
final Map<String, Integer> nodesToShardCount = new HashMap<>();
|
||||||
|
for (final RoutingNode node : clusterState.getRoutingNodes()) {
|
||||||
logger.info("--> node {} has {} shards",
|
logger.info("--> node {} has {} shards",
|
||||||
node.nodeId(), resp12.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
|
node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
|
||||||
nodesToShardCount.put(node.nodeId(), resp12.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
|
nodesToShardCount.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
|
||||||
}
|
}
|
||||||
assertThat("node1 has 5 shards", nodesToShardCount.get(realNodeNames.get(0)), equalTo(5));
|
assertThat("node1 has 5 shards", nodesToShardCount.get(realNodeNames.get(0)), equalTo(5));
|
||||||
assertThat("node2 has 5 shards", nodesToShardCount.get(realNodeNames.get(1)), equalTo(5));
|
assertThat("node2 has 5 shards", nodesToShardCount.get(realNodeNames.get(1)), equalTo(5));
|
||||||
|
@ -130,17 +118,13 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
||||||
cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50));
|
cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50));
|
||||||
cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 50)); // node3 has free space now
|
cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 50)); // node3 has free space now
|
||||||
|
|
||||||
// Retrieve the count of shards on each node
|
|
||||||
nodesToShardCount.clear();
|
|
||||||
|
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
ClusterStateResponse resp1 = client().admin().cluster().prepareState().get();
|
final Map<String, Integer> nodesToShardCount = new HashMap<>();
|
||||||
Iterator<RoutingNode> iter1 = resp1.getState().getRoutingNodes().iterator();
|
final ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
|
||||||
while (iter1.hasNext()) {
|
for (final RoutingNode node : clusterState.getRoutingNodes()) {
|
||||||
RoutingNode node = iter1.next();
|
|
||||||
logger.info("--> node {} has {} shards",
|
logger.info("--> node {} has {} shards",
|
||||||
node.nodeId(), resp1.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
|
node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
|
||||||
nodesToShardCount.put(node.nodeId(), resp1.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
|
nodesToShardCount.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
|
||||||
}
|
}
|
||||||
assertThat("node1 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(0)), greaterThanOrEqualTo(3));
|
assertThat("node1 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(0)), greaterThanOrEqualTo(3));
|
||||||
assertThat("node2 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(1)), greaterThanOrEqualTo(3));
|
assertThat("node2 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(1)), greaterThanOrEqualTo(3));
|
||||||
|
|
Loading…
Reference in New Issue