Use disassociate in preference to deassociate (#37704)
This commit is contained in:
parent
2439f68745
commit
d193ca8aae
|
@ -188,8 +188,8 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor<JoinTaskExecut
|
|||
.blocks(currentState.blocks())
|
||||
.removeGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID)).build();
|
||||
logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes());
|
||||
tmpState = PersistentTasksCustomMetaData.deassociateDeadNodes(tmpState);
|
||||
return ClusterState.builder(allocationService.deassociateDeadNodes(tmpState, false, "removed dead nodes on election"));
|
||||
tmpState = PersistentTasksCustomMetaData.disassociateDeadNodes(tmpState);
|
||||
return ClusterState.builder(allocationService.disassociateDeadNodes(tmpState, false, "removed dead nodes on election"));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -92,9 +92,9 @@ public class NodeRemovalClusterStateTaskExecutor implements ClusterStateTaskExec
|
|||
|
||||
protected ClusterTasksResult<Task> getTaskClusterTasksResult(ClusterState currentState, List<Task> tasks,
|
||||
ClusterState remainingNodesClusterState) {
|
||||
ClusterState ptasksDeassociatedState = PersistentTasksCustomMetaData.deassociateDeadNodes(remainingNodesClusterState);
|
||||
ClusterState ptasksDisassociatedState = PersistentTasksCustomMetaData.disassociateDeadNodes(remainingNodesClusterState);
|
||||
final ClusterTasksResult.Builder<Task> resultBuilder = ClusterTasksResult.<Task>builder().successes(tasks);
|
||||
return resultBuilder.build(allocationService.deassociateDeadNodes(ptasksDeassociatedState, true, describeTasks(tasks)));
|
||||
return resultBuilder.build(allocationService.disassociateDeadNodes(ptasksDisassociatedState, true, describeTasks(tasks)));
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
|
|
|
@ -45,7 +45,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
* the delay marker). These are shards that have become unassigned due to a node leaving
|
||||
* and which were assigned the delay marker based on the index delay setting
|
||||
* {@link UnassignedInfo#INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING}
|
||||
* (see {@link AllocationService#deassociateDeadNodes(RoutingAllocation)}).
|
||||
* (see {@link AllocationService#disassociateDeadNodes(RoutingAllocation)}.
|
||||
* This class is responsible for choosing the next (closest) delay expiration of a
|
||||
* delayed shard to schedule a reroute to remove the delay marker.
|
||||
* The actual removal of the delay marker happens in
|
||||
|
|
|
@ -327,7 +327,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
*
|
||||
*/
|
||||
public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) {
|
||||
// It's possible for replicaNodeVersion to be null, when deassociating dead nodes
|
||||
// It's possible for replicaNodeVersion to be null, when disassociating dead nodes
|
||||
// that have been removed, the shards are failed, and part of the shard failing
|
||||
// calls this method with an out-of-date RoutingNodes, where the version might not
|
||||
// be accessible. Therefore, we need to protect against the version being null
|
||||
|
|
|
@ -216,7 +216,7 @@ public class AllocationService {
|
|||
* unassigned an shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas
|
||||
* if needed.
|
||||
*/
|
||||
public ClusterState deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
|
||||
public ClusterState disassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
|
||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
|
@ -224,7 +224,7 @@ public class AllocationService {
|
|||
clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
|
||||
// first, clear from the shards any node id they used to belong to that is now dead
|
||||
deassociateDeadNodes(allocation);
|
||||
disassociateDeadNodes(allocation);
|
||||
|
||||
if (allocation.routingNodesChanged()) {
|
||||
clusterState = buildResult(clusterState, allocation);
|
||||
|
@ -400,7 +400,7 @@ public class AllocationService {
|
|||
}
|
||||
|
||||
private void reroute(RoutingAllocation allocation) {
|
||||
assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See deassociateDeadNodes";
|
||||
assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See disassociateDeadNodes";
|
||||
assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metaData(), allocation.nodes()).isEmpty() :
|
||||
"auto-expand replicas out of sync with number of nodes in the cluster";
|
||||
|
||||
|
@ -414,7 +414,7 @@ public class AllocationService {
|
|||
assert RoutingNodes.assertShardStats(allocation.routingNodes());
|
||||
}
|
||||
|
||||
private void deassociateDeadNodes(RoutingAllocation allocation) {
|
||||
private void disassociateDeadNodes(RoutingAllocation allocation) {
|
||||
for (Iterator<RoutingNode> it = allocation.routingNodes().mutableIterator(); it.hasNext(); ) {
|
||||
RoutingNode node = it.next();
|
||||
if (allocation.nodes().getDataNodes().containsKey(node.nodeId())) {
|
||||
|
|
|
@ -225,7 +225,7 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable<M
|
|||
* @return If no changes the argument {@code clusterState} is returned else
|
||||
* a copy with the modified tasks
|
||||
*/
|
||||
public static ClusterState deassociateDeadNodes(ClusterState clusterState) {
|
||||
public static ClusterState disassociateDeadNodes(ClusterState clusterState) {
|
||||
PersistentTasksCustomMetaData tasks = getPersistentTasksCustomMetaData(clusterState);
|
||||
if (tasks == null) {
|
||||
return clusterState;
|
||||
|
|
|
@ -70,7 +70,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase {
|
|||
|
||||
public void testRerouteAfterRemovingNodes() throws Exception {
|
||||
final AllocationService allocationService = mock(AllocationService.class);
|
||||
when(allocationService.deassociateDeadNodes(any(ClusterState.class), eq(true), any(String.class)))
|
||||
when(allocationService.disassociateDeadNodes(any(ClusterState.class), eq(true), any(String.class)))
|
||||
.thenAnswer(im -> im.getArguments()[0]);
|
||||
|
||||
final AtomicReference<ClusterState> remainingNodesClusterState = new AtomicReference<>();
|
||||
|
@ -102,7 +102,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase {
|
|||
final ClusterStateTaskExecutor.ClusterTasksResult<NodeRemovalClusterStateTaskExecutor.Task> result =
|
||||
executor.execute(clusterState, tasks);
|
||||
|
||||
verify(allocationService).deassociateDeadNodes(eq(remainingNodesClusterState.get()), eq(true), any(String.class));
|
||||
verify(allocationService).disassociateDeadNodes(eq(remainingNodesClusterState.get()), eq(true), any(String.class));
|
||||
|
||||
for (final NodeRemovalClusterStateTaskExecutor.Task task : tasks) {
|
||||
assertNull(result.resultingState.nodes().get(task.node().getId()));
|
||||
|
|
|
@ -104,7 +104,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
|
|||
nodes.add(newNode("node3"));
|
||||
}
|
||||
clusterState = ClusterState.builder(clusterState).nodes(nodes).build();
|
||||
clusterState = allocationService.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocationService.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
ClusterState newState = clusterState;
|
||||
List<ShardRouting> unassignedShards = newState.getRoutingTable().shardsWithState(ShardRoutingState.UNASSIGNED);
|
||||
if (nodeAvailableForAllocation) {
|
||||
|
@ -153,7 +153,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
|
|||
|
||||
// remove node that has replica and reroute
|
||||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(nodeId)).build();
|
||||
clusterState = allocationService.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocationService.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
ClusterState stateWithDelayedShard = clusterState;
|
||||
// make sure the replica is marked as delayed (i.e. not reallocated)
|
||||
assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard));
|
||||
|
@ -264,7 +264,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
|
|||
.build();
|
||||
// make sure both replicas are marked as delayed (i.e. not reallocated)
|
||||
allocationService.setNanoTimeOverride(baseTimestampNanos);
|
||||
clusterState = allocationService.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocationService.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
final ClusterState stateWithDelayedShards = clusterState;
|
||||
assertEquals(2, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShards));
|
||||
RoutingNodes.UnassignedShards.UnassignedIterator iter = stateWithDelayedShards.getRoutingNodes().unassigned().iterator();
|
||||
|
@ -401,7 +401,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
|
|||
// remove node that has replica and reroute
|
||||
clusterState = ClusterState.builder(clusterState).nodes(
|
||||
DiscoveryNodes.builder(clusterState.nodes()).remove(nodeIdOfFooReplica)).build();
|
||||
clusterState = allocationService.deassociateDeadNodes(clusterState, true, "fake node left");
|
||||
clusterState = allocationService.disassociateDeadNodes(clusterState, true, "fake node left");
|
||||
ClusterState stateWithDelayedShard = clusterState;
|
||||
// make sure the replica is marked as delayed (i.e. not reallocated)
|
||||
assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard));
|
||||
|
@ -444,7 +444,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
|
|||
// remove node that has replica and reroute
|
||||
clusterState = ClusterState.builder(stateWithDelayedShard).nodes(
|
||||
DiscoveryNodes.builder(stateWithDelayedShard.nodes()).remove(nodeIdOfBarReplica)).build();
|
||||
ClusterState stateWithShorterDelay = allocationService.deassociateDeadNodes(clusterState, true, "fake node left");
|
||||
ClusterState stateWithShorterDelay = allocationService.disassociateDeadNodes(clusterState, true, "fake node left");
|
||||
delayedAllocationService.setNanoTimeOverride(clusterChangeEventTimestampNanos);
|
||||
delayedAllocationService.clusterChanged(
|
||||
new ClusterChangedEvent("fake node left", stateWithShorterDelay, stateWithDelayedShard));
|
||||
|
|
|
@ -241,7 +241,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
|
|||
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
|
||||
// remove node2 and reroute
|
||||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
// verify that NODE_LEAVE is the reason for meta
|
||||
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true));
|
||||
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1));
|
||||
|
@ -332,7 +332,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
|
|||
// remove node2 and reroute
|
||||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
|
||||
// make sure both replicas are marked as delayed (i.e. not reallocated)
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
assertThat(clusterState.toString(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2));
|
||||
}
|
||||
|
||||
|
@ -364,7 +364,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
|
|||
final long baseTime = System.nanoTime();
|
||||
allocation.setNanoTimeOverride(baseTime);
|
||||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
final long delta = randomBoolean() ? 0 : randomInt((int) expectMinDelaySettingsNanos - 1);
|
||||
|
||||
|
|
|
@ -341,7 +341,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
|
|||
}
|
||||
|
||||
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
|
||||
clusterState = service.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = service.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
logger.info("start all the primary shards, replicas will start initializing");
|
||||
RoutingNodes routingNodes = clusterState.getRoutingNodes();
|
||||
|
|
|
@ -181,7 +181,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
|
|||
|
||||
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
|
||||
if (removed) {
|
||||
clusterState = strategy.deassociateDeadNodes(clusterState, randomBoolean(), "removed nodes");
|
||||
clusterState = strategy.disassociateDeadNodes(clusterState, randomBoolean(), "removed nodes");
|
||||
}
|
||||
|
||||
logger.info("start all the primary shards, replicas will start initializing");
|
||||
|
|
|
@ -84,7 +84,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
|
|||
.add(newNode(nodeIdRemaining))
|
||||
).build();
|
||||
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
assertThat(clusterState.getRoutingNodes().node(nodeIdRemaining).iterator().next().primary(), equalTo(true));
|
||||
assertThat(clusterState.getRoutingNodes().node(nodeIdRemaining).iterator().next().state(), equalTo(STARTED));
|
||||
|
@ -154,7 +154,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
|
|||
.add(newNode(origPrimaryNodeId))
|
||||
.add(newNode(origReplicaNodeId))
|
||||
).build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(STARTED));
|
||||
assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED));
|
||||
|
@ -224,7 +224,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
|
|||
.add(newNode("node3"))
|
||||
.add(newNode(origReplicaNodeId))
|
||||
).build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED));
|
||||
assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING));
|
||||
|
|
|
@ -96,7 +96,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest
|
|||
indexShardRoutingTable.primaryShard().currentNodeId());
|
||||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
|
||||
clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
logger.info("make sure all the primary shards are active");
|
||||
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().active(), equalTo(true));
|
||||
|
|
|
@ -111,7 +111,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase {
|
|||
.remove(clusterState.routingTable().index("test2").shard(0).primaryShard().currentNodeId())
|
||||
)
|
||||
.build();
|
||||
clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
routingNodes = clusterState.getRoutingNodes();
|
||||
|
||||
for (RoutingNode routingNode : routingNodes) {
|
||||
|
|
|
@ -101,7 +101,7 @@ public class InSyncAllocationIdTests extends ESAllocationTestCase {
|
|||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.remove("node1"))
|
||||
.build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
// in-sync allocation ids should not be updated
|
||||
assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(3));
|
||||
|
@ -110,7 +110,7 @@ public class InSyncAllocationIdTests extends ESAllocationTestCase {
|
|||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.remove("node2").remove("node3"))
|
||||
.build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
// in-sync allocation ids should not be updated
|
||||
assertThat(clusterState.getRoutingTable().shardsWithState(UNASSIGNED).size(), equalTo(3));
|
||||
|
@ -145,7 +145,7 @@ public class InSyncAllocationIdTests extends ESAllocationTestCase {
|
|||
/**
|
||||
* Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica.
|
||||
* The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, the node of the replica was
|
||||
* removed from the cluster (deassociateDeadNodes). This means that the ShardRouting of the replica was failed, but it's allocation
|
||||
* removed from the cluster (disassociateDeadNodes). This means that the ShardRouting of the replica was failed, but it's allocation
|
||||
* id is still part of the in-sync set. We have to make sure that the failShard request from the primary removes the allocation id
|
||||
* from the in-sync set.
|
||||
*/
|
||||
|
@ -158,7 +158,7 @@ public class InSyncAllocationIdTests extends ESAllocationTestCase {
|
|||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.remove(replicaShard.currentNodeId()))
|
||||
.build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(2));
|
||||
|
||||
logger.info("fail replica (for which there is no shard routing in the CS anymore)");
|
||||
|
@ -228,7 +228,7 @@ public class InSyncAllocationIdTests extends ESAllocationTestCase {
|
|||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.remove(replicaShard.currentNodeId()))
|
||||
.build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
// in-sync allocation ids should not be updated
|
||||
assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0));
|
||||
|
@ -248,7 +248,7 @@ public class InSyncAllocationIdTests extends ESAllocationTestCase {
|
|||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.remove(replicaShard.currentNodeId()))
|
||||
.build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
}
|
||||
|
||||
// in-sync allocation set is bounded
|
||||
|
@ -276,7 +276,7 @@ public class InSyncAllocationIdTests extends ESAllocationTestCase {
|
|||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.remove(replicaShard.currentNodeId()))
|
||||
.build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
// in-sync allocation ids should not be updated
|
||||
assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0));
|
||||
|
@ -285,7 +285,7 @@ public class InSyncAllocationIdTests extends ESAllocationTestCase {
|
|||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.remove(primaryShard.currentNodeId()))
|
||||
.build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
// in-sync allocation ids should not be updated
|
||||
assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0));
|
||||
|
@ -327,7 +327,7 @@ public class InSyncAllocationIdTests extends ESAllocationTestCase {
|
|||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.remove(replicaShard.currentNodeId()))
|
||||
.build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
// in-sync allocation ids should not be updated
|
||||
assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0));
|
||||
|
|
|
@ -388,7 +388,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
|
|||
private ClusterState stabilize(ClusterState clusterState, AllocationService service) {
|
||||
logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes());
|
||||
|
||||
clusterState = service.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = service.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
RoutingNodes routingNodes = clusterState.getRoutingNodes();
|
||||
assertRecoveryNodeVersions(routingNodes);
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation
|
|||
.build();
|
||||
clusterState = ClusterState.builder(clusterState).metaData(metaData)
|
||||
.nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
|
||||
clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
|
||||
logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned",
|
||||
numberOfShards, numberOfShards);
|
||||
|
|
|
@ -77,7 +77,7 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
|
|||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.add(newNode("node3")).remove("node1")).build();
|
||||
RoutingTable prevRoutingTable = clusterState.routingTable();
|
||||
clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
routingNodes = clusterState.getRoutingNodes();
|
||||
routingTable = clusterState.routingTable();
|
||||
|
||||
|
@ -129,7 +129,7 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
|
|||
String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
|
||||
String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
|
||||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode(nodeIdRemaining))).build();
|
||||
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
routingNodes = clusterState.getRoutingNodes();
|
||||
|
||||
assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(1));
|
||||
|
|
|
@ -130,7 +130,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
|
|||
stateBuilder.nodes(newNodesBuilder.build());
|
||||
clusterState = stateBuilder.build();
|
||||
if (nodesRemoved) {
|
||||
clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
} else {
|
||||
clusterState = strategy.reroute(clusterState, "reroute");
|
||||
}
|
||||
|
|
|
@ -334,7 +334,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
|
|||
IndexShardRoutingTable indexShardRoutingTable = clusterState.routingTable().index("test").shard(0);
|
||||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
|
||||
.remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
|
||||
clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
routingNodes = clusterState.getRoutingNodes();
|
||||
|
||||
assertThat(assertShardStats(routingNodes), equalTo(true));
|
||||
|
|
|
@ -117,7 +117,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
|
|||
logger.info("Killing node1 where the shard is, checking the shard is unassigned");
|
||||
|
||||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
|
||||
newState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
newState = strategy.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
assertThat(newState, not(equalTo(clusterState)));
|
||||
clusterState = newState;
|
||||
|
||||
|
|
|
@ -130,7 +130,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase {
|
|||
logger.info("Kill node1, backup shard should become primary");
|
||||
|
||||
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
|
||||
newState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
|
||||
newState = strategy.disassociateDeadNodes(clusterState, true, "reroute");
|
||||
assertThat(newState, not(equalTo(clusterState)));
|
||||
clusterState = newState;
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
|
|||
|
||||
// now remove the node of the other copy and fail the current
|
||||
DiscoveryNode node1 = state.nodes().resolveNode("node1");
|
||||
state = service.deassociateDeadNodes(
|
||||
state = service.disassociateDeadNodes(
|
||||
ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).remove("node1")).build(),
|
||||
true, "test");
|
||||
state = service.applyFailedShard(state, routingTable.index("idx").shard(0).primaryShard(), randomBoolean());
|
||||
|
|
|
@ -136,7 +136,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
|||
builder.nodes(DiscoveryNodes.builder(currentState.nodes()).remove("_non_existent"));
|
||||
|
||||
currentState = builder.build();
|
||||
return allocationService.deassociateDeadNodes(currentState, true, "reroute");
|
||||
return allocationService.disassociateDeadNodes(currentState, true, "reroute");
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -317,7 +317,7 @@ public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializ
|
|||
|
||||
public void testDisassociateDeadNodes_givenNoPersistentTasks() {
|
||||
ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")).build();
|
||||
ClusterState returnedState = PersistentTasksCustomMetaData.deassociateDeadNodes(originalState);
|
||||
ClusterState returnedState = PersistentTasksCustomMetaData.disassociateDeadNodes(originalState);
|
||||
assertThat(originalState, sameInstance(returnedState));
|
||||
}
|
||||
|
||||
|
@ -337,7 +337,7 @@ public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializ
|
|||
.nodes(nodes)
|
||||
.metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()))
|
||||
.build();
|
||||
ClusterState returnedState = PersistentTasksCustomMetaData.deassociateDeadNodes(originalState);
|
||||
ClusterState returnedState = PersistentTasksCustomMetaData.disassociateDeadNodes(originalState);
|
||||
assertThat(originalState, sameInstance(returnedState));
|
||||
|
||||
PersistentTasksCustomMetaData originalTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(originalState);
|
||||
|
@ -363,7 +363,7 @@ public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializ
|
|||
.nodes(nodes)
|
||||
.metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()))
|
||||
.build();
|
||||
ClusterState returnedState = PersistentTasksCustomMetaData.deassociateDeadNodes(originalState);
|
||||
ClusterState returnedState = PersistentTasksCustomMetaData.disassociateDeadNodes(originalState);
|
||||
assertThat(originalState, not(sameInstance(returnedState)));
|
||||
|
||||
PersistentTasksCustomMetaData originalTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(originalState);
|
||||
|
|
Loading…
Reference in New Issue