diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 1ad18c1f69f..8565beb1b89 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -19,10 +19,9 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.MockInternalClusterInfoService; import org.elasticsearch.cluster.routing.RoutingNode; @@ -33,10 +32,9 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; @@ -50,21 +48,15 @@ public class MockDiskUsagesIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { // Use the mock internal cluster info service, which has fake-able disk usages - return Arrays.asList(MockInternalClusterInfoService.TestPlugin.class); + return Collections.singletonList(MockInternalClusterInfoService.TestPlugin.class); } public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { List nodes = internalCluster().startNodes(3); - // Wait for all 3 nodes to be up - assertBusy(() -> { - NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get(); - assertThat(resp.getNodes().size(), equalTo(3)); - }); - // Start with all nodes at 50% usage final MockInternalClusterInfoService cis = (MockInternalClusterInfoService) - internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); + internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); cis.setUpdateFrequency(TimeValue.timeValueMillis(200)); cis.onMaster(); cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", "/dev/null", 100, 50)); @@ -73,34 +65,32 @@ public class MockDiskUsagesIT extends ESIntegTestCase { final boolean watermarkBytes = randomBoolean(); // we have to consistently use bytes or percentage for the disk watermark settings client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "20b" : "80%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") - .put( - DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), - watermarkBytes ? "0b" : "100%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get(); + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "20b" : "80%") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), + watermarkBytes ? "0b" : "100%") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get(); // Create an index with 10 shards so we can check allocation for it prepareCreate("test").setSettings(Settings.builder() - .put("number_of_shards", 10) - .put("number_of_replicas", 0) - .put("index.routing.allocation.exclude._name", "")).get(); + .put("number_of_shards", 10) + .put("number_of_replicas", 0)).get(); ensureGreen("test"); // Block until the "fake" cluster info is retrieved at least once assertBusy(() -> { - ClusterInfo info = cis.getClusterInfo(); + final ClusterInfo info = cis.getClusterInfo(); logger.info("--> got: {} nodes", info.getNodeLeastAvailableDiskUsages().size()); assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThan(0)); }); final List realNodeNames = new ArrayList<>(); - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - Iterator iter = resp.getState().getRoutingNodes().iterator(); - while (iter.hasNext()) { - RoutingNode node = iter.next(); - realNodeNames.add(node.nodeId()); - logger.info("--> node {} has {} shards", - node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + { + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + for (final RoutingNode node : clusterState.getRoutingNodes()) { + realNodeNames.add(node.nodeId()); + logger.info("--> node {} has {} shards", + node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + } } // Update the disk usages so one node has now passed the high watermark @@ -108,17 +98,15 @@ public class MockDiskUsagesIT extends ESIntegTestCase { cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50)); cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 0)); // nothing free on node3 - // Retrieve the count of shards on each node - final Map nodesToShardCount = new HashMap<>(); - assertBusy(() -> { - ClusterStateResponse resp12 = client().admin().cluster().prepareState().get(); - Iterator iter12 = resp12.getState().getRoutingNodes().iterator(); - while (iter12.hasNext()) { - RoutingNode node = iter12.next(); + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + logger.info("--> {}", clusterState.routingTable()); + + final Map nodesToShardCount = new HashMap<>(); + for (final RoutingNode node : clusterState.getRoutingNodes()) { logger.info("--> node {} has {} shards", - node.nodeId(), resp12.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - nodesToShardCount.put(node.nodeId(), resp12.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + nodesToShardCount.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); } assertThat("node1 has 5 shards", nodesToShardCount.get(realNodeNames.get(0)), equalTo(5)); assertThat("node2 has 5 shards", nodesToShardCount.get(realNodeNames.get(1)), equalTo(5)); @@ -130,17 +118,13 @@ public class MockDiskUsagesIT extends ESIntegTestCase { cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50)); cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 50)); // node3 has free space now - // Retrieve the count of shards on each node - nodesToShardCount.clear(); - assertBusy(() -> { - ClusterStateResponse resp1 = client().admin().cluster().prepareState().get(); - Iterator iter1 = resp1.getState().getRoutingNodes().iterator(); - while (iter1.hasNext()) { - RoutingNode node = iter1.next(); + final Map nodesToShardCount = new HashMap<>(); + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + for (final RoutingNode node : clusterState.getRoutingNodes()) { logger.info("--> node {} has {} shards", - node.nodeId(), resp1.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - nodesToShardCount.put(node.nodeId(), resp1.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + nodesToShardCount.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); } assertThat("node1 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(0)), greaterThanOrEqualTo(3)); assertThat("node2 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(1)), greaterThanOrEqualTo(3));