|
|
|
@ -0,0 +1,727 @@
|
|
|
|
|
/*
|
|
|
|
|
* Licensed to Elastic Search and Shay Banon under one
|
|
|
|
|
* or more contributor license agreements. See the NOTICE file
|
|
|
|
|
* distributed with this work for additional information
|
|
|
|
|
* regarding copyright ownership. Elastic Search licenses this
|
|
|
|
|
* file to you under the Apache License, Version 2.0 (the
|
|
|
|
|
* "License"); you may not use this file except in compliance
|
|
|
|
|
* with the License. You may obtain a copy of the License at
|
|
|
|
|
*
|
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
*
|
|
|
|
|
* Unless required by applicable law or agreed to in writing,
|
|
|
|
|
* software distributed under the License is distributed on an
|
|
|
|
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
|
|
|
* KIND, either express or implied. See the License for the
|
|
|
|
|
* specific language governing permissions and limitations
|
|
|
|
|
* under the License.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
package org.elasticsearch.cluster.routing.allocation;
|
|
|
|
|
|
|
|
|
|
import org.elasticsearch.cluster.ClusterState;
|
|
|
|
|
import org.elasticsearch.cluster.metadata.MetaData;
|
|
|
|
|
import org.elasticsearch.cluster.routing.RoutingTable;
|
|
|
|
|
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
|
|
|
|
import org.elasticsearch.common.collect.ImmutableMap;
|
|
|
|
|
import org.elasticsearch.common.logging.ESLogger;
|
|
|
|
|
import org.elasticsearch.common.logging.Loggers;
|
|
|
|
|
import org.testng.annotations.Test;
|
|
|
|
|
|
|
|
|
|
import static org.elasticsearch.cluster.ClusterState.*;
|
|
|
|
|
import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
|
|
|
|
|
import static org.elasticsearch.cluster.metadata.MetaData.*;
|
|
|
|
|
import static org.elasticsearch.cluster.node.DiscoveryNodes.*;
|
|
|
|
|
import static org.elasticsearch.cluster.routing.RoutingBuilders.*;
|
|
|
|
|
import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
|
|
|
|
|
import static org.elasticsearch.cluster.routing.allocation.RoutingAllocationTests.*;
|
|
|
|
|
import static org.elasticsearch.common.settings.ImmutableSettings.*;
|
|
|
|
|
import static org.hamcrest.MatcherAssert.*;
|
|
|
|
|
import static org.hamcrest.Matchers.*;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
*/
|
|
|
|
|
@Test
|
|
|
|
|
public class AwarenessAllocationTests {
|
|
|
|
|
|
|
|
|
|
private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class);
|
|
|
|
|
|
|
|
|
|
@Test public void moveShardOnceNewNodeWithAttributeAdded1() {
|
|
|
|
|
AllocationService strategy = new AllocationService(settingsBuilder()
|
|
|
|
|
.put("cluster.routing.allocation.concurrent_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.allow_rebalance", "always")
|
|
|
|
|
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
|
|
|
|
|
.build());
|
|
|
|
|
|
|
|
|
|
logger.info("Building initial routing table");
|
|
|
|
|
|
|
|
|
|
MetaData metaData = newMetaDataBuilder()
|
|
|
|
|
.put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(1))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
RoutingTable routingTable = routingTable()
|
|
|
|
|
.add(indexRoutingTable("test").initializeEmpty(metaData.index("test")))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> adding two nodes on same rack and do rerouting");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder()
|
|
|
|
|
.put(newNode("node1", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node2", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (primaries)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (replicas)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add a new node with a new rack and reroute");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node3", ImmutableMap.of("rack_id", "2")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add another node with a new rack, make sure nothing moves");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node4", ImmutableMap.of("rack_id", "3")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
assertThat(routingTable, sameInstance(clusterState.routingTable()));
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@Test public void moveShardOnceNewNodeWithAttributeAdded2() {
|
|
|
|
|
AllocationService strategy = new AllocationService(settingsBuilder()
|
|
|
|
|
.put("cluster.routing.allocation.concurrent_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.allow_rebalance", "always")
|
|
|
|
|
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
|
|
|
|
|
.build());
|
|
|
|
|
|
|
|
|
|
logger.info("Building initial routing table");
|
|
|
|
|
|
|
|
|
|
MetaData metaData = newMetaDataBuilder()
|
|
|
|
|
.put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(1))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
RoutingTable routingTable = routingTable()
|
|
|
|
|
.add(indexRoutingTable("test").initializeEmpty(metaData.index("test")))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> adding two nodes on same rack and do rerouting");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder()
|
|
|
|
|
.put(newNode("node1", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node2", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node3", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (primaries)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (replicas)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add a new node with a new rack and reroute");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node4", ImmutableMap.of("rack_id", "2")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add another node with a new rack, make sure nothing moves");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node5", ImmutableMap.of("rack_id", "3")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
assertThat(routingTable, sameInstance(clusterState.routingTable()));
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@Test public void moveShardOnceNewNodeWithAttributeAdded3() {
|
|
|
|
|
AllocationService strategy = new AllocationService(settingsBuilder()
|
|
|
|
|
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.allow_rebalance", "always")
|
|
|
|
|
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
|
|
|
|
|
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
|
|
|
|
|
.build());
|
|
|
|
|
|
|
|
|
|
logger.info("Building initial routing table");
|
|
|
|
|
|
|
|
|
|
MetaData metaData = newMetaDataBuilder()
|
|
|
|
|
.put(newIndexMetaDataBuilder("test").numberOfShards(5).numberOfReplicas(1))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
RoutingTable routingTable = routingTable()
|
|
|
|
|
.add(indexRoutingTable("test").initializeEmpty(metaData.index("test")))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> adding two nodes on same rack and do rerouting");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder()
|
|
|
|
|
.put(newNode("node1", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node2", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (primaries)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (replicas)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add a new node with a new rack and reroute");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node3", ImmutableMap.of("rack_id", "2")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(5));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(5));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(5));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete initializing");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> run it again, since we still might have relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add another node with a new rack, some more relocation should happen");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node4", ImmutableMap.of("rack_id", "3")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@Test public void moveShardOnceNewNodeWithAttributeAdded4() {
|
|
|
|
|
AllocationService strategy = new AllocationService(settingsBuilder()
|
|
|
|
|
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.allow_rebalance", "always")
|
|
|
|
|
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
|
|
|
|
|
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
|
|
|
|
|
.build());
|
|
|
|
|
|
|
|
|
|
logger.info("Building initial routing table");
|
|
|
|
|
|
|
|
|
|
MetaData metaData = newMetaDataBuilder()
|
|
|
|
|
.put(newIndexMetaDataBuilder("test1").numberOfShards(5).numberOfReplicas(1))
|
|
|
|
|
.put(newIndexMetaDataBuilder("test2").numberOfShards(5).numberOfReplicas(1))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
RoutingTable routingTable = routingTable()
|
|
|
|
|
.add(indexRoutingTable("test1").initializeEmpty(metaData.index("test1")))
|
|
|
|
|
.add(indexRoutingTable("test2").initializeEmpty(metaData.index("test2")))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> adding two nodes on same rack and do rerouting");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder()
|
|
|
|
|
.put(newNode("node1", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node2", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(10));
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (primaries)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (replicas)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add a new node with a new rack and reroute");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node3", ImmutableMap.of("rack_id", "2")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(10));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete initializing");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> run it again, since we still might have relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add another node with a new rack, some more relocation should happen");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node4", ImmutableMap.of("rack_id", "3")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@Test public void moveShardOnceNewNodeWithAttributeAdded5() {
|
|
|
|
|
AllocationService strategy = new AllocationService(settingsBuilder()
|
|
|
|
|
.put("cluster.routing.allocation.concurrent_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.allow_rebalance", "always")
|
|
|
|
|
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
|
|
|
|
|
.build());
|
|
|
|
|
|
|
|
|
|
logger.info("Building initial routing table");
|
|
|
|
|
|
|
|
|
|
MetaData metaData = newMetaDataBuilder()
|
|
|
|
|
.put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(2))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
RoutingTable routingTable = routingTable()
|
|
|
|
|
.add(indexRoutingTable("test").initializeEmpty(metaData.index("test")))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> adding two nodes on same rack and do rerouting");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder()
|
|
|
|
|
.put(newNode("node1", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node2", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (primaries)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (replicas)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add a new node with a new rack and reroute");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node3", ImmutableMap.of("rack_id", "2")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add another node with a new rack, we will have another relocation");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node4", ImmutableMap.of("rack_id", "3")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
|
|
|
|
|
|
|
|
|
|
logger.info("--> make sure another reroute does not move things");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@Test public void moveShardOnceNewNodeWithAttributeAdded6() {
|
|
|
|
|
AllocationService strategy = new AllocationService(settingsBuilder()
|
|
|
|
|
.put("cluster.routing.allocation.concurrent_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.allow_rebalance", "always")
|
|
|
|
|
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
|
|
|
|
|
.build());
|
|
|
|
|
|
|
|
|
|
logger.info("Building initial routing table");
|
|
|
|
|
|
|
|
|
|
MetaData metaData = newMetaDataBuilder()
|
|
|
|
|
.put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(3))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
RoutingTable routingTable = routingTable()
|
|
|
|
|
.add(indexRoutingTable("test").initializeEmpty(metaData.index("test")))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> adding two nodes on same rack and do rerouting");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder()
|
|
|
|
|
.put(newNode("node1", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node2", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node3", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node4", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (primaries)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (replicas)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add a new node with a new rack and reroute");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node5", ImmutableMap.of("rack_id", "2")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node5"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add another node with a new rack, we will have another relocation");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node6", ImmutableMap.of("rack_id", "3")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(3));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node6"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
|
|
|
|
|
|
|
|
|
|
logger.info("--> make sure another reroute does not move things");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@Test public void fullAwareness1() {
|
|
|
|
|
AllocationService strategy = new AllocationService(settingsBuilder()
|
|
|
|
|
.put("cluster.routing.allocation.concurrent_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.allow_rebalance", "always")
|
|
|
|
|
.put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
|
|
|
|
|
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
|
|
|
|
|
.build());
|
|
|
|
|
|
|
|
|
|
logger.info("Building initial routing table");
|
|
|
|
|
|
|
|
|
|
MetaData metaData = newMetaDataBuilder()
|
|
|
|
|
.put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(1))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
RoutingTable routingTable = routingTable()
|
|
|
|
|
.add(indexRoutingTable("test").initializeEmpty(metaData.index("test")))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> adding two nodes on same rack and do rerouting");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder()
|
|
|
|
|
.put(newNode("node1", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node2", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (primaries)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> replica will not start because we have only one rack value");
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add a new node with a new rack and reroute");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node3", ImmutableMap.of("rack_id", "2")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add another node with a new rack, make sure nothing moves");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node4", ImmutableMap.of("rack_id", "3")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
assertThat(routingTable, sameInstance(clusterState.routingTable()));
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@Test public void fullAwareness2() {
|
|
|
|
|
AllocationService strategy = new AllocationService(settingsBuilder()
|
|
|
|
|
.put("cluster.routing.allocation.concurrent_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.allow_rebalance", "always")
|
|
|
|
|
.put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
|
|
|
|
|
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
|
|
|
|
|
.build());
|
|
|
|
|
|
|
|
|
|
logger.info("Building initial routing table");
|
|
|
|
|
|
|
|
|
|
MetaData metaData = newMetaDataBuilder()
|
|
|
|
|
.put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(1))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
RoutingTable routingTable = routingTable()
|
|
|
|
|
.add(indexRoutingTable("test").initializeEmpty(metaData.index("test")))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> adding two nodes on same rack and do rerouting");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder()
|
|
|
|
|
.put(newNode("node1", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node2", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node3", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (primaries)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> replica will not start because we have only one rack value");
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add a new node with a new rack and reroute");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node4", ImmutableMap.of("rack_id", "2")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node4"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add another node with a new rack, make sure nothing moves");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node5", ImmutableMap.of("rack_id", "3")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
assertThat(routingTable, sameInstance(clusterState.routingTable()));
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@Test public void fullAwareness3() {
|
|
|
|
|
AllocationService strategy = new AllocationService(settingsBuilder()
|
|
|
|
|
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
|
|
|
|
|
.put("cluster.routing.allocation.allow_rebalance", "always")
|
|
|
|
|
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
|
|
|
|
|
.put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
|
|
|
|
|
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
|
|
|
|
|
.build());
|
|
|
|
|
|
|
|
|
|
logger.info("Building initial routing table");
|
|
|
|
|
|
|
|
|
|
MetaData metaData = newMetaDataBuilder()
|
|
|
|
|
.put(newIndexMetaDataBuilder("test1").numberOfShards(5).numberOfReplicas(1))
|
|
|
|
|
.put(newIndexMetaDataBuilder("test2").numberOfShards(5).numberOfReplicas(1))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
RoutingTable routingTable = routingTable()
|
|
|
|
|
.add(indexRoutingTable("test1").initializeEmpty(metaData.index("test1")))
|
|
|
|
|
.add(indexRoutingTable("test2").initializeEmpty(metaData.index("test2")))
|
|
|
|
|
.build();
|
|
|
|
|
|
|
|
|
|
ClusterState clusterState = newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> adding two nodes on same rack and do rerouting");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder()
|
|
|
|
|
.put(newNode("node1", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
.put(newNode("node2", ImmutableMap.of("rack_id", "1")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(10));
|
|
|
|
|
|
|
|
|
|
logger.info("--> start the shards (primaries)");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add a new node with a new rack and reroute");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node3", ImmutableMap.of("rack_id", "2")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10));
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete initializing");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
logger.info("--> run it again, since we still might have relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
|
|
|
|
|
logger.info("--> add another node with a new rack, some more relocation should happen");
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).nodes(newNodesBuilder().putAll(clusterState.nodes())
|
|
|
|
|
.put(newNode("node4", ImmutableMap.of("rack_id", "3")))
|
|
|
|
|
).build();
|
|
|
|
|
routingTable = strategy.reroute(clusterState).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
|
|
|
|
|
|
|
|
|
|
logger.info("--> complete relocation");
|
|
|
|
|
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
|
|
|
|
clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
|
|
|
|
|
|
|
|
|
|
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
|
|
|
|
|
|
|
|
|
|
logger.info("--> do another reroute, make sure nothing moves");
|
|
|
|
|
assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
|
|
|
|
|
}
|
|
|
|
|
}
|