Remove DiscoveryNode#id in favour of existing DiscoveryNode#getId

This commit is contained in:
javanna 2016-03-30 14:42:15 +02:00 committed by Luca Cavanna
parent eed885eeab
commit 9889f10e5e
74 changed files with 207 additions and 218 deletions

View File

@ -68,7 +68,7 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
builder.startObject("nodes");
for (NodeInfo nodeInfo : this) {
builder.startObject(nodeInfo.getNode().id(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject(nodeInfo.getNode().getId(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("name", nodeInfo.getNode().name(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("transport_address", nodeInfo.getNode().address().toString());

View File

@ -65,7 +65,7 @@ public class NodesStatsResponse extends BaseNodesResponse<NodeStats> implements
builder.startObject("nodes");
for (NodeStats nodeStats : this) {
builder.startObject(nodeStats.getNode().id(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject(nodeStats.getNode().getId(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("timestamp", nodeStats.getTimestamp());
nodeStats.toXContent(builder, params);

View File

@ -86,7 +86,7 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.NODES);
for (DiscoveryNode node : nodes) {
builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
builder.field(Fields.NAME, node.name(), XContentBuilder.FieldCaseConversion.NONE);
builder.endObject();
}

View File

@ -107,7 +107,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
protected NodeSnapshotStatus nodeOperation(NodeRequest request) {
Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = new HashMap<>();
try {
String nodeId = clusterService.localNode().id();
String nodeId = clusterService.localNode().getId();
for (SnapshotId snapshotId : request.snapshotIds) {
Map<ShardId, IndexShardSnapshotStatus> shardsStatus = snapshotShardsService.currentSnapshotShards(snapshotId);
if (shardsStatus == null) {

View File

@ -194,7 +194,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
}
private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
for (ShardRouting shardRouting : routingNodes.node(node.id())) {
for (ShardRouting shardRouting : routingNodes.node(node.getId())) {
ShardId shardId = shardRouting.shardId();
if (shardId.id() == shardID && shardId.getIndexName().equals(index)) {
if (shardRouting.primary()) {

View File

@ -155,7 +155,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
@Override
public void onFailure(Throwable t) {
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
onFirstPhaseResult(shardIndex, shard, node.getId(), shardIt, t);
}
});
}

View File

@ -299,7 +299,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
try {
NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards);
if (task != null) {
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId());
taskManager.registerChildTask(task, node.getId());
}
transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler<NodeResponse>() {
@ -330,7 +330,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) {
if (logger.isTraceEnabled()) {
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
logger.trace("received response for [{}] from node [{}]", actionName, node.getId());
}
// this is defensive to protect against the possibility of double invocation
@ -344,7 +344,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
}
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
String nodeId = node.id();
String nodeId = node.getId();
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId);
}

View File

@ -81,7 +81,7 @@ public abstract class BaseNodesResponse<TNodeResponse extends BaseNodeResponse>
if (nodesMap == null) {
nodesMap = new HashMap<>();
for (TNodeResponse nodeResponse : nodes) {
nodesMap.put(nodeResponse.getNode().id(), nodeResponse);
nodesMap.put(nodeResponse.getNode().getId(), nodeResponse);
}
}
return nodesMap;

View File

@ -161,7 +161,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
} else {
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
if (task != null) {
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId());
taskManager.registerChildTask(task, node.getId());
}
@ -178,7 +178,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
@Override
public void handleException(TransportException exp) {
onFailure(idx, node.id(), exp);
onFailure(idx, node.getId(), exp);
}
@Override

View File

@ -552,7 +552,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
final Throwable cause = exp.unwrapCause();
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
(isPrimaryAction && retryPrimaryException(cause))) {
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.id(), request);
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.getId(), request);
retry(exp);
} else {
finishAsFailed(exp);

View File

@ -113,10 +113,10 @@ public abstract class TransportTasksAction<
results.add(response);
}
} catch (Exception ex) {
exceptions.add(new TaskOperationFailure(clusterService.localNode().id(), task.getId(), ex));
exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), task.getId(), ex));
}
});
return new NodeTasksResponse(clusterService.localNode().id(), results, exceptions);
return new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions);
}
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
@ -237,7 +237,7 @@ public abstract class TransportTasksAction<
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
} else {
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId());
taskManager.registerChildTask(task, node.getId());
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(),
new BaseTransportResponseHandler<NodeTasksResponse>() {
@ -253,7 +253,7 @@ public abstract class TransportTasksAction<
@Override
public void handleException(TransportException exp) {
onFailure(idx, node.id(), exp);
onFailure(idx, node.getId(), exp);
}
@Override

View File

@ -383,7 +383,7 @@ public class TransportClientNodesService extends AbstractComponent {
// use discovered information but do keep the original transport address,
// so people can control which address is exactly used.
DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode();
newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.id(), nodeWithInfo.getHostName(),
newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.getId(), nodeWithInfo.getHostName(),
nodeWithInfo.getHostAddress(), listedNode.getAddress(), nodeWithInfo.getAttributes(),
nodeWithInfo.getRoles(), nodeWithInfo.getVersion()));
} else {

View File

@ -409,7 +409,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
mostAvailablePath = info;
}
}
String nodeId = nodeStats.getNode().id();
String nodeId = nodeStats.getNode().getId();
String nodeName = nodeStats.getNode().getName();
if (logger.isTraceEnabled()) {
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}",

View File

@ -221,18 +221,11 @@ public class DiscoveryNode implements Writeable<DiscoveryNode>, ToXContent {
return address();
}
/**
* The unique id of the node.
*/
public String id() {
return nodeId;
}
/**
* The unique id of the node.
*/
public String getId() {
return id();
return nodeId;
}
/**
@ -377,7 +370,7 @@ public class DiscoveryNode implements Writeable<DiscoveryNode>, ToXContent {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(id(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject(getId(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("name", name());
builder.field("transport_address", address().toString());

View File

@ -155,7 +155,7 @@ public class DiscoveryNodeFilters {
}
} else if ("_id".equals(attr)) {
for (String value : values) {
if (node.id().equals(value)) {
if (node.getId().equals(value)) {
if (opType == OpType.OR) {
return true;
}

View File

@ -340,7 +340,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
int index = 0;
nodesIds = new String[nodes.size()];
for (DiscoveryNode node : this) {
nodesIds[index++] = node.id();
nodesIds[index++] = node.getId();
}
return nodesIds;
} else {
@ -362,14 +362,14 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
// not a node id, try and search by name
for (DiscoveryNode node : this) {
if (Regex.simpleMatch(nodeId, node.name())) {
resolvedNodesIds.add(node.id());
resolvedNodesIds.add(node.getId());
}
}
for (DiscoveryNode node : this) {
if (Regex.simpleMatch(nodeId, node.getHostAddress())) {
resolvedNodesIds.add(node.id());
resolvedNodesIds.add(node.getId());
} else if (Regex.simpleMatch(nodeId, node.getHostName())) {
resolvedNodesIds.add(node.id());
resolvedNodesIds.add(node.getId());
}
}
int index = nodeId.indexOf(':');
@ -400,7 +400,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
String attrName = entry.getKey();
String attrValue = entry.getValue();
if (Regex.simpleMatch(matchAttrName, attrName) && Regex.simpleMatch(matchAttrValue, attrValue)) {
resolvedNodesIds.add(node.id());
resolvedNodesIds.add(node.getId());
}
}
}
@ -415,7 +415,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
public DiscoveryNodes removeDeadMembers(Set<String> newNodes, String masterNodeId) {
Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId);
for (DiscoveryNode node : this) {
if (newNodes.contains(node.id())) {
if (newNodes.contains(node.getId())) {
builder.put(node);
}
}
@ -433,12 +433,12 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
List<DiscoveryNode> removed = new ArrayList<>();
List<DiscoveryNode> added = new ArrayList<>();
for (DiscoveryNode node : other) {
if (!this.nodeExists(node.id())) {
if (!this.nodeExists(node.getId())) {
removed.add(node);
}
}
for (DiscoveryNode node : this) {
if (!other.nodeExists(node.id())) {
if (!other.nodeExists(node.getId())) {
added.add(node);
}
}
@ -539,7 +539,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
public String shortSummary() {
StringBuilder sb = new StringBuilder();
if (!removed() && masterNodeChanged()) {
if (newMasterNode.id().equals(localNodeId)) {
if (newMasterNode.getId().equals(localNodeId)) {
// we are the master, no nodes we removed, we are actually the first master
sb.append("new_master ").append(newMasterNode());
} else {
@ -567,13 +567,13 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
}
if (added()) {
// don't print if there is one added, and it is us
if (!(addedNodes().size() == 1 && addedNodes().get(0).id().equals(localNodeId))) {
if (!(addedNodes().size() == 1 && addedNodes().get(0).getId().equals(localNodeId))) {
if (removed() || masterNodeChanged()) {
sb.append(", ");
}
sb.append("added {");
for (DiscoveryNode node : addedNodes()) {
if (!node.id().equals(localNodeId)) {
if (!node.getId().equals(localNodeId)) {
// don't print ourself
sb.append(node).append(',');
}
@ -605,12 +605,12 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
builder.masterNodeId(in.readString());
}
if (localNode != null) {
builder.localNodeId(localNode.id());
builder.localNodeId(localNode.getId());
}
int size = in.readVInt();
for (int i = 0; i < size; i++) {
DiscoveryNode node = new DiscoveryNode(in);
if (localNode != null && node.id().equals(localNode.id())) {
if (localNode != null && node.getId().equals(localNode.getId())) {
// reuse the same instance of our address and local node id for faster equality
node = localNode;
}
@ -649,7 +649,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
}
public Builder put(DiscoveryNode node) {
nodes.put(node.id(), node);
nodes.put(node.getId(), node);
return this;
}

View File

@ -87,7 +87,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
Map<String, List<ShardRouting>> nodesToShards = new HashMap<>();
// fill in the nodeToShards with the "live" nodes
for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) {
nodesToShards.put(cursor.value.id(), new ArrayList<>());
nodesToShards.put(cursor.value.getId(), new ArrayList<>());
}
// fill in the inverse of node -> shards allocated
@ -570,7 +570,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
public void addNode(DiscoveryNode node) {
ensureMutable();
RoutingNode routingNode = new RoutingNode(node.id(), node);
RoutingNode routingNode = new RoutingNode(node.getId(), node);
nodesToShards.put(routingNode.nodeId(), routingNode);
}

View File

@ -91,7 +91,7 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
return explainOrThrowRejectedCommand(explain, allocation, e);
}
final RoutingNodes routingNodes = allocation.routingNodes();
RoutingNode routingNode = routingNodes.node(discoNode.id());
RoutingNode routingNode = routingNodes.node(discoNode.getId());
if (routingNode == null) {
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
}

View File

@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import java.io.IOException;
@ -90,7 +89,7 @@ public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocation
return explainOrThrowRejectedCommand(explain, allocation, e);
}
final RoutingNodes routingNodes = allocation.routingNodes();
RoutingNode routingNode = routingNodes.node(discoNode.id());
RoutingNode routingNode = routingNodes.node(discoNode.getId());
if (routingNode == null) {
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
}

View File

@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import java.io.IOException;
@ -92,7 +91,7 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation
return explainOrThrowRejectedCommand(explain, allocation, e);
}
final RoutingNodes routingNodes = allocation.routingNodes();
RoutingNode routingNode = routingNodes.node(discoNode.id());
RoutingNode routingNode = routingNodes.node(discoNode.getId());
if (routingNode == null) {
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
}

View File

@ -175,7 +175,7 @@ public class CancelAllocationCommand implements AllocationCommand {
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
boolean found = false;
for (RoutingNodes.RoutingNodeIterator it = allocation.routingNodes().routingNodeIter(discoNode.id()); it.hasNext(); ) {
for (RoutingNodes.RoutingNodeIterator it = allocation.routingNodes().routingNodeIter(discoNode.getId()); it.hasNext(); ) {
ShardRouting shardRouting = it.next();
if (!shardRouting.shardId().getIndex().getName().equals(index)) {
continue;

View File

@ -155,7 +155,7 @@ public class MoveAllocationCommand implements AllocationCommand {
Decision decision = null;
boolean found = false;
for (ShardRouting shardRouting : allocation.routingNodes().node(fromDiscoNode.id())) {
for (ShardRouting shardRouting : allocation.routingNodes().node(fromDiscoNode.getId())) {
if (!shardRouting.shardId().getIndexName().equals(index)) {
continue;
}
@ -174,7 +174,7 @@ public class MoveAllocationCommand implements AllocationCommand {
", shard is not started (state = " + shardRouting.state() + "]");
}
RoutingNode toRoutingNode = allocation.routingNodes().node(toDiscoNode.id());
RoutingNode toRoutingNode = allocation.routingNodes().node(toDiscoNode.getId());
decision = allocation.deciders().canAllocate(shardRouting, toRoutingNode, allocation);
if (decision.type() == Decision.Type.NO) {
if (explain) {

View File

@ -159,7 +159,7 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
synchronized public void setLocalNode(DiscoveryNode localNode) {
assert clusterState.nodes().localNodeId() == null : "local node is already set";
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.id());
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.getId());
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
}

View File

@ -129,7 +129,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode());
}
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
// remove the NO_MASTER block in this case
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock());
return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build();
@ -155,7 +155,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode());
}
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
return ClusterState.builder(currentState).nodes(nodesBuilder).build();
}
@ -207,7 +207,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
final Set<String> newMembers = new HashSet<>();
for (LocalDiscovery discovery : clusterGroup.members()) {
newMembers.add(discovery.localNode().id());
newMembers.add(discovery.localNode().getId());
}
final LocalDiscovery master = firstMaster;
@ -219,7 +219,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode().id());
DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode().getId());
DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes());
if (delta.added()) {
logger.warn("No new nodes should be created when a new discovery view is accepted");
@ -251,7 +251,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override
public String nodeDescription() {
return clusterName.value() + "/" + localNode().id();
return clusterName.value() + "/" + localNode().getId();
}
@Override
@ -312,7 +312,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
synchronized (this) {
// we do the marshaling intentionally, to check it works well...
// check if we published cluster state at least once and node was in the cluster when we published cluster state the last time
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode().id())) {
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode().getId())) {
// both conditions are true - which means we can try sending cluster state as diffs
if (clusterStateDiffBytes == null) {
Diff diff = clusterState.diff(clusterChangedEvent.previousState());

View File

@ -246,7 +246,7 @@ public class NodeJoinController extends AbstractComponent {
throw new NotMasterException("Node [" + clusterService.localNode() + "] not master for join request");
}
DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(currentState.nodes()).masterNodeId(currentState.nodes().localNode().id());
DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(currentState.nodes()).masterNodeId(currentState.nodes().localNode().getId());
// update the fact that we are the master...
ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock()).build();
currentState = ClusterState.builder(currentState).nodes(builder).blocks(clusterBlocks).build();
@ -378,14 +378,14 @@ public class NodeJoinController extends AbstractComponent {
final DiscoveryNode node = entry.getKey();
joinCallbacksToRespondTo.addAll(entry.getValue());
iterator.remove();
if (currentState.nodes().nodeExists(node.id())) {
if (currentState.nodes().nodeExists(node.getId())) {
logger.debug("received a join request for an existing node [{}]", node);
} else {
nodeAdded = true;
nodesBuilder.put(node);
for (DiscoveryNode existingNode : currentState.nodes()) {
if (node.address().equals(existingNode.address())) {
nodesBuilder.remove(existingNode.id());
nodesBuilder.remove(existingNode.getId());
logger.warn("received join request from node [{}], but found existing node {} with same address, removing existing node", node, existingNode);
}
}

View File

@ -283,7 +283,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
@Override
public String nodeDescription() {
return clusterName.value() + "/" + clusterService.localNode().id();
return clusterName.value() + "/" + clusterService.localNode().getId();
}
/** start of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
@ -501,7 +501,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.id());
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.getId());
currentState = ClusterState.builder(currentState).nodes(builder).build();
// check if we have enough master nodes, if not, we need to move into joining the cluster again
if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
@ -541,12 +541,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override
public ClusterState execute(ClusterState currentState) {
if (currentState.nodes().get(node.id()) == null) {
if (currentState.nodes().get(node.getId()) == null) {
logger.debug("node [{}] already removed from cluster state. ignoring.", node);
return currentState;
}
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes())
.remove(node.id());
.remove(node.getId());
currentState = ClusterState.builder(currentState).nodes(builder).build();
// check if we have enough master nodes, if not, we need to move into joining the cluster again
if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
@ -634,14 +634,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
@Override
public ClusterState execute(ClusterState currentState) {
if (!masterNode.id().equals(currentState.nodes().masterNodeId())) {
if (!masterNode.getId().equals(currentState.nodes().masterNodeId())) {
// master got switched on us, no need to send anything
return currentState;
}
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(currentState.nodes())
// make sure the old master node, which has failed, is not part of the nodes we publish
.remove(masterNode.id())
.remove(masterNode.getId())
.masterNodeId(null).build();
// flush any pending cluster states from old master, so it will not be set as master again

View File

@ -154,7 +154,7 @@ public class ElectMasterService extends AbstractComponent {
if (!o1.masterNode() && o2.masterNode()) {
return 1;
}
return o1.id().compareTo(o2.id());
return o1.getId().compareTo(o2.getId());
}
}
}

View File

@ -227,7 +227,7 @@ public class MasterFaultDetection extends FaultDetection {
threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
return;
}
final MasterPingRequest request = new MasterPingRequest(clusterService.localNode().id(), masterToPing.id(), clusterName);
final MasterPingRequest request = new MasterPingRequest(clusterService.localNode().getId(), masterToPing.getId(), clusterName);
final TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout).build();
transportService.sendRequest(masterToPing, MASTER_PING_ACTION_NAME, request, options, new BaseTransportResponseHandler<MasterPingResponseResponse>() {

View File

@ -94,7 +94,7 @@ public class NodesFaultDetection extends FaultDetection {
public void updateNodesAndPing(ClusterState clusterState) {
// remove any nodes we don't need, this will cause their FD to stop
for (DiscoveryNode monitoredNode : nodesFD.keySet()) {
if (!clusterState.nodes().nodeExists(monitoredNode.id())) {
if (!clusterState.nodes().nodeExists(monitoredNode.getId())) {
nodesFD.remove(monitoredNode);
}
}
@ -200,7 +200,7 @@ public class NodesFaultDetection extends FaultDetection {
if (!running()) {
return;
}
final PingRequest pingRequest = new PingRequest(node.id(), clusterName, localNode, clusterStateVersion);
final PingRequest pingRequest = new PingRequest(node.getId(), clusterName, localNode, clusterStateVersion);
final TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout).build();
transportService.sendRequest(node, PING_ACTION_NAME, pingRequest, options, new BaseTransportResponseHandler<PingResponse>() {
@Override
@ -255,8 +255,8 @@ public class NodesFaultDetection extends FaultDetection {
public void messageReceived(PingRequest request, TransportChannel channel) throws Exception {
// if we are not the node we are supposed to be pinged, send an exception
// this can happen when a kill -9 is sent, and another node is started using the same port
if (!localNode.id().equals(request.nodeId)) {
throw new IllegalStateException("Got pinged as node [" + request.nodeId + "], but I am node [" + localNode.id() + "]");
if (!localNode.getId().equals(request.nodeId)) {
throw new IllegalStateException("Got pinged as node [" + request.nodeId + "], but I am node [" + localNode.getId() + "]");
}
// PingRequest will have clusterName set to null if it came from a node of version <1.4.0

View File

@ -378,9 +378,9 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
// to make sure we don't disconnect a true node which was temporarily removed from the DiscoveryNodes
// but will be added again during the pinging. We therefore create a new temporary node
if (!nodeFoundByAddress) {
if (!nodeToSend.id().startsWith(UNICAST_NODE_PREFIX)) {
if (!nodeToSend.getId().startsWith(UNICAST_NODE_PREFIX)) {
DiscoveryNode tempNode = new DiscoveryNode("",
UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "_" + nodeToSend.id() + "#",
UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "_" + nodeToSend.getId() + "#",
nodeToSend.getHostName(), nodeToSend.getHostAddress(), nodeToSend.getAddress(), nodeToSend.getAttributes(),
nodeToSend.getRoles(), nodeToSend.getVersion());
@ -469,7 +469,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
try {
DiscoveryNodes discoveryNodes = contextProvider.nodes();
for (PingResponse pingResponse : response.pingResponses) {
if (pingResponse.node().id().equals(discoveryNodes.localNodeId())) {
if (pingResponse.node().getId().equals(discoveryNodes.localNodeId())) {
// that's us, ignore
continue;
}

View File

@ -179,7 +179,7 @@ public class PublishClusterStateAction extends AbstractComponent {
// try and serialize the cluster state once (or per version), so we don't serialize it
// per node when we send it over the wire, compress it while we are at it...
// we don't send full version if node didn't exist in the previous version of cluster state
if (sendFullVersion || !previousState.nodes().nodeExists(node.id())) {
if (sendFullVersion || !previousState.nodes().nodeExists(node.getId())) {
sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController);
} else {
sendClusterStateDiff(clusterState, serializedDiffs, serializedStates, node, publishTimeout, sendingController);
@ -210,7 +210,7 @@ public class PublishClusterStateAction extends AbstractComponent {
Diff<ClusterState> diff = null;
for (final DiscoveryNode node : nodesToPublishTo) {
try {
if (sendFullVersion || !previousState.nodes().nodeExists(node.id())) {
if (sendFullVersion || !previousState.nodes().nodeExists(node.getId())) {
// will send a full reference
if (serializedStates.containsKey(node.version()) == false) {
serializedStates.put(node.version(), serializeFullClusterState(clusterState, node.version()));

View File

@ -158,12 +158,12 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
NodeGatewayStartedShards nodeShardState = nodesToAllocate.yesNodeShards.get(0);
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
changed = true;
unassignedIterator.initialize(nodeShardState.getNode().id(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
} else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
NodeGatewayStartedShards nodeShardState = nodesToAllocate.noNodeShards.get(0);
logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
changed = true;
unassignedIterator.initialize(nodeShardState.getNode().id(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
} else {
// we are throttling this, but we have enough to allocate to this node, ignore it for now
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards);
@ -187,7 +187,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
DiscoveryNode node = nodeShardState.getNode();
String allocationId = nodeShardState.allocationId();
if (ignoreNodes.contains(node.id())) {
if (ignoreNodes.contains(node.getId())) {
continue;
}
@ -272,7 +272,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
List<NodeGatewayStartedShards> throttledNodeShards = new ArrayList<>();
List<NodeGatewayStartedShards> noNodeShards = new ArrayList<>();
for (NodeGatewayStartedShards nodeShardState : nodeShardStates) {
RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().id());
RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().getId());
if (node == null) {
continue;
}
@ -302,7 +302,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
long version = nodeShardState.legacyVersion();
DiscoveryNode node = nodeShardState.getNode();
if (ignoreNodes.contains(node.id())) {
if (ignoreNodes.contains(node.getId())) {
continue;
}

View File

@ -164,7 +164,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores);
if (matchingNodes.getNodeWithHighestMatch() != null) {
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().id());
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId());
// we only check on THROTTLE since we checked before before on NO
Decision decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation);
if (decision.type() == Decision.Type.THROTTLE) {
@ -217,7 +217,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
*/
private boolean canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) {
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().dataNodes().values()) {
RoutingNode node = allocation.routingNodes().node(cursor.value.id());
RoutingNode node = allocation.routingNodes().node(cursor.value.getId());
if (node == null) {
continue;
}
@ -259,7 +259,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
continue;
}
RoutingNode node = allocation.routingNodes().node(discoNode.id());
RoutingNode node = allocation.routingNodes().node(discoNode.getId());
if (node == null) {
continue;
}

View File

@ -380,7 +380,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override
public void handleResponse(PreSyncedFlushResponse response) {
Engine.CommitId existing = commitIds.putIfAbsent(node.id(), response.commitId());
Engine.CommitId existing = commitIds.putIfAbsent(node.getId(), response.commitId());
assert existing == null : "got two answers for node [" + node + "]";
// count after the assert so we won't decrement twice in handleException
if (countDown.countDown()) {

View File

@ -88,7 +88,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
// starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
// the index operations will not be routed to it properly
RoutingNode node = clusterService.state().getRoutingNodes().node(request.targetNode().id());
RoutingNode node = clusterService.state().getRoutingNodes().node(request.targetNode().getId());
if (node == null) {
logger.debug("delaying recovery of {} as source node {} is unknown", request.shardId(), request.targetNode());
throw new DelayRecoveryException("source node does not have the node [" + request.targetNode() + "] in its state yet..");

View File

@ -316,7 +316,7 @@ public class RecoveryState implements ToXContent, Streamable {
restoreSource.toXContent(builder, params);
} else {
builder.startObject(Fields.SOURCE);
builder.field(Fields.ID, sourceNode.id());
builder.field(Fields.ID, sourceNode.getId());
builder.field(Fields.HOST, sourceNode.getHostName());
builder.field(Fields.TRANSPORT_ADDRESS, sourceNode.address().toString());
builder.field(Fields.IP, sourceNode.getHostAddress());
@ -325,7 +325,7 @@ public class RecoveryState implements ToXContent, Streamable {
}
builder.startObject(Fields.TARGET);
builder.field(Fields.ID, targetNode.id());
builder.field(Fields.ID, targetNode.getId());
builder.field(Fields.HOST, targetNode.getHostName());
builder.field(Fields.TRANSPORT_ADDRESS, targetNode.address().toString());
builder.field(Fields.IP, targetNode.getHostAddress());

View File

@ -160,7 +160,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
}
// check if shard is active on the current node or is getting relocated to the our node
String localNodeId = state.getNodes().localNode().id();
String localNodeId = state.getNodes().localNode().getId();
if (localNodeId.equals(shardRouting.currentNodeId()) || localNodeId.equals(shardRouting.relocatingNodeId())) {
return false;
}

View File

@ -85,7 +85,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
doVerify(repository, verificationToken);
} catch (Throwable t) {
logger.warn("[{}] failed to verify repository", t, repository);
errors.add(new VerificationFailure(node.id(), t));
errors.add(new VerificationFailure(node.getId(), t));
}
if (counter.decrementAndGet() == 0) {
finishVerification(listener, nodes, errors);
@ -101,7 +101,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
@Override
public void handleException(TransportException exp) {
errors.add(new VerificationFailure(node.id(), exp));
errors.add(new VerificationFailure(node.getId(), exp));
if (counter.decrementAndGet() == 0) {
finishVerification(listener, nodes, errors);
}

View File

@ -120,7 +120,7 @@ public class RestAllocationAction extends AbstractCatAction {
for (NodeStats nodeStats : stats.getNodes()) {
DiscoveryNode node = nodeStats.getNode();
int shardCount = allocs.getOrDefault(node.id(), 0);
int shardCount = allocs.getOrDefault(node.getId(), 0);
ByteSizeValue total = nodeStats.getFs().getTotal().getTotal();
ByteSizeValue avail = nodeStats.getFs().getTotal().getAvailable();

View File

@ -124,7 +124,7 @@ public class RestFielddataAction extends AbstractCatAction {
table.startRow();
// add the node info and field data total before each individual field
NodeStats ns = statsEntry.getKey();
table.addCell(ns.getNode().id());
table.addCell(ns.getNode().getId());
table.addCell(ns.getNode().getHostName());
table.addCell(ns.getNode().getHostAddress());
table.addCell(ns.getNode().getName());

View File

@ -102,11 +102,11 @@ public class RestNodeAttrsAction extends AbstractCatAction {
Table table = getTableWithHeader(req);
for (DiscoveryNode node : nodes) {
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
table.startRow();
table.addCell(node.name());
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4));
table.addCell(info == null ? null : info.getProcess().getId());
table.addCell(node.getHostName());
table.addCell(node.getHostAddress());

View File

@ -227,8 +227,8 @@ public class RestNodesAction extends AbstractCatAction {
Table table = getTableWithHeader(req);
for (DiscoveryNode node : nodes) {
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
NodeStats stats = nodesStats.getNodesMap().get(node.id());
NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
NodeStats stats = nodesStats.getNodesMap().get(node.getId());
JvmInfo jvmInfo = info == null ? null : info.getJvm();
JvmStats jvmStats = stats == null ? null : stats.getJvm();
@ -239,7 +239,7 @@ public class RestNodesAction extends AbstractCatAction {
table.startRow();
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4));
table.addCell(info == null ? null : info.getProcess().getId());
table.addCell(node.getHostAddress());
if (node.address() instanceof InetSocketTransportAddress) {
@ -287,7 +287,7 @@ public class RestNodesAction extends AbstractCatAction {
roles = node.getRoles().stream().map(DiscoveryNode.Role::getAbbreviation).collect(Collectors.joining());
}
table.addCell(roles);
table.addCell(masterId == null ? "x" : masterId.equals(node.id()) ? "*" : "-");
table.addCell(masterId == null ? "x" : masterId.equals(node.getId()) ? "*" : "-");
table.addCell(node.name());
CompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion();

View File

@ -94,11 +94,11 @@ public class RestPluginsAction extends AbstractCatAction {
Table table = getTableWithHeader(req);
for (DiscoveryNode node : nodes) {
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
for (PluginInfo pluginInfo : info.getPlugins().getPluginInfos()) {
table.startRow();
table.addCell(node.id());
table.addCell(node.getId());
table.addCell(node.name());
table.addCell(pluginInfo.getName());
table.addCell(pluginInfo.getVersion());

View File

@ -226,11 +226,11 @@ public class RestThreadPoolAction extends AbstractCatAction {
Table table = getTableWithHeader(req);
for (DiscoveryNode node : nodes) {
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
NodeStats stats = nodesStats.getNodesMap().get(node.id());
NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
NodeStats stats = nodesStats.getNodesMap().get(node.getId());
table.startRow();
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4));
table.addCell(info == null ? null : info.getProcess().getId());
table.addCell(node.getHostName());
table.addCell(node.getHostAddress());

View File

@ -542,7 +542,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().getId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;

View File

@ -207,7 +207,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent<SnapshotSh
// snapshots in the future
Map<SnapshotId, Map<ShardId, IndexShardSnapshotStatus>> newSnapshots = new HashMap<>();
// Now go through all snapshots and update existing or create missing
final String localNodeId = clusterService.localNode().id();
final String localNodeId = clusterService.localNode().getId();
if (snapshotsInProgress != null) {
for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) {
if (entry.state() == SnapshotsInProgress.State.STARTED) {

View File

@ -951,7 +951,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
globalLock.readLock().lock();
try {
try (Releasable ignored = connectionLock.acquire(node.id())) {
try (Releasable ignored = connectionLock.acquire(node.getId())) {
if (!lifecycle.started()) {
throw new IllegalStateException("can't add nodes to a stopped transport");
}
@ -1109,7 +1109,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
@Override
public void disconnectFromNode(DiscoveryNode node) {
try (Releasable ignored = connectionLock.acquire(node.id())) {
try (Releasable ignored = connectionLock.acquire(node.getId())) {
NodeChannels nodeChannels = connectedNodes.remove(node);
if (nodeChannels != null) {
try {
@ -1131,7 +1131,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
// check outside of the lock
NodeChannels nodeChannels = connectedNodes.get(node);
if (nodeChannels != null && nodeChannels.hasChannel(channel)) {
try (Releasable ignored = connectionLock.acquire(node.id())) {
try (Releasable ignored = connectionLock.acquire(node.getId())) {
nodeChannels = connectedNodes.get(node);
// check again within the connection lock, if its still applicable to remove it
if (nodeChannels != null && nodeChannels.hasChannel(channel)) {

View File

@ -353,20 +353,20 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
for (DiscoveryNode discoNode : currentState.nodes()) {
String markedTribeName = discoNode.getAttributes().get(TRIBE_NAME_SETTING.getKey());
if (markedTribeName != null && markedTribeName.equals(tribeName)) {
if (tribeState.nodes().get(discoNode.id()) == null) {
if (tribeState.nodes().get(discoNode.getId()) == null) {
clusterStateChanged = true;
logger.info("[{}] removing node [{}]", tribeName, discoNode);
nodes.remove(discoNode.id());
nodes.remove(discoNode.getId());
}
}
}
// go over tribe nodes, and see if they need to be added
for (DiscoveryNode tribe : tribeState.nodes()) {
if (currentState.nodes().get(tribe.id()) == null) {
if (currentState.nodes().get(tribe.getId()) == null) {
// a new node, add it, but also add the tribe name to the attributes
Map<String, String> tribeAttr = new HashMap<>(tribe.getAttributes());
tribeAttr.put(TRIBE_NAME_SETTING.getKey(), tribeName);
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(),
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.getId(), tribe.getHostName(), tribe.getHostAddress(),
tribe.address(), unmodifiableMap(tribeAttr), tribe.getRoles(), tribe.version());
clusterStateChanged = true;
logger.info("[{}] adding node [{}]", tribeName, discoNode);

View File

@ -111,7 +111,7 @@ public class IndicesShardStoreResponseTests extends ESTestCase {
assertThat(eitherLegacyVersionOrAllocationIdSet, equalTo(true));
assertThat(storeInfo.containsKey("allocation"), equalTo(true));
assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocationStatus().value()));
assertThat(storeInfo.containsKey(storeStatus.getNode().id()), equalTo(true));
assertThat(storeInfo.containsKey(storeStatus.getNode().getId()), equalTo(true));
if (storeStatus.getStoreException() != null) {
assertThat(storeInfo.containsKey("store_exception"), equalTo(true));
}

View File

@ -224,14 +224,14 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
totalIndexShards += numberOfShards;
for (int j = 0; j < numberOfShards; j++) {
final ShardId shardId = new ShardId(index, "_na_", ++shardIndex);
ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.id(), true, ShardRoutingState.STARTED);
ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.getId(), true, ShardRoutingState.STARTED);
IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(shardId);
indexShard.addShard(shard);
indexRoutingTable.addIndexShard(indexShard.build());
}
}
discoBuilder.localNodeId(newNode(0).id());
discoBuilder.masterNodeId(newNode(numberOfNodes - 1).id());
discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(numberOfNodes - 1).getId());
ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName(TEST_CLUSTER));
stateBuilder.nodes(discoBuilder);
final IndexMetaData.Builder indexMetaData = IndexMetaData.builder(index)
@ -320,7 +320,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
DiscoveryNode masterNode = clusterService.state().nodes().masterNode();
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes());
builder.remove(masterNode.id());
builder.remove(masterNode.getId());
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder));
@ -332,7 +332,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[]{TEST_INDEX});
Set<String> set = new HashSet<>();
for (ShardRouting shard : shardIt.asUnordered()) {
if (!shard.currentNodeId().equals(masterNode.id())) {
if (!shard.currentNodeId().equals(masterNode.getId())) {
set.add(shard.currentNodeId());
}
}
@ -405,7 +405,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
if (simulateFailedMasterNode) {
failedMasterNode = clusterService.state().nodes().masterNode();
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes());
builder.remove(failedMasterNode.id());
builder.remove(failedMasterNode.getId());
builder.masterNodeId(null);
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder));
@ -453,7 +453,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
}
}
if (simulateFailedMasterNode) {
totalShards += map.get(failedMasterNode.id()).size();
totalShards += map.get(failedMasterNode.getId()).size();
}
Response response = listener.get();

View File

@ -135,8 +135,8 @@ public class TransportNodesActionTests extends ESTestCase {
discoBuilder = discoBuilder.put(node);
discoveryNodes.add(node);
}
discoBuilder.localNodeId(randomFrom(discoveryNodes).id());
discoBuilder.masterNodeId(randomFrom(discoveryNodes).id());
discoBuilder.localNodeId(randomFrom(discoveryNodes).getId());
discoBuilder.masterNodeId(randomFrom(discoveryNodes).getId());
ClusterState.Builder stateBuilder = ClusterState.builder(CLUSTER_NAME);
stateBuilder.nodes(discoBuilder);
ClusterState clusterState = stateBuilder.build();

View File

@ -82,10 +82,10 @@ public class ClusterStateCreationUtils {
for (int i = 0; i < numberOfNodes + 1; i++) {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.put(node);
unassignedNodes.add(node.id());
unassignedNodes.add(node.getId());
}
discoBuilder.localNodeId(newNode(0).id());
discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures
discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures
final int primaryTerm = randomInt(200);
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
@ -101,11 +101,11 @@ public class ClusterStateCreationUtils {
UnassignedInfo unassignedInfo = null;
if (primaryState != ShardRoutingState.UNASSIGNED) {
if (activePrimaryLocal) {
primaryNode = newNode(0).id();
primaryNode = newNode(0).getId();
unassignedNodes.remove(primaryNode);
} else {
Set<String> unassignedNodesExecludingPrimary = new HashSet<>(unassignedNodes);
unassignedNodesExecludingPrimary.remove(newNode(0).id());
unassignedNodesExecludingPrimary.remove(newNode(0).getId());
primaryNode = selectAndRemove(unassignedNodesExecludingPrimary);
}
if (primaryState == ShardRoutingState.RELOCATING) {
@ -154,8 +154,8 @@ public class ClusterStateCreationUtils {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.put(node);
}
discoBuilder.localNodeId(newNode(0).id());
discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures
discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1)
@ -169,9 +169,9 @@ public class ClusterStateCreationUtils {
routing.addAsNew(indexMetaData);
final ShardId shardId = new ShardId(index, "_na_", i);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).id(), null, null, true,
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).getId(), null, null, true,
ShardRoutingState.STARTED, null));
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).id(), null, null, false,
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).getId(), null, null, false,
ShardRoutingState.STARTED, null));
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
}
@ -221,8 +221,8 @@ public class ClusterStateCreationUtils {
public static ClusterState stateWithNoShard() {
int numberOfNodes = 2;
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
discoBuilder.localNodeId(newNode(0).id());
discoBuilder.masterNodeId(newNode(1).id());
discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(1).getId());
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
state.nodes(discoBuilder);
state.metaData(MetaData.builder().generateClusterUuidIfNeeded());
@ -244,9 +244,9 @@ public class ClusterStateCreationUtils {
discoBuilder.put(node);
}
if (masterNode != null) {
discoBuilder.masterNodeId(masterNode.id());
discoBuilder.masterNodeId(masterNode.getId());
}
discoBuilder.localNodeId(localNode.id());
discoBuilder.localNodeId(localNode.getId());
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
state.nodes(discoBuilder);

View File

@ -768,7 +768,7 @@ public class TransportReplicationActionTests extends ESTestCase {
assertEquals(1, shardFailedRequests.length);
CapturingTransport.CapturedRequest shardFailedRequest = shardFailedRequests[0];
// get the shard the request was sent to
ShardRouting routing = clusterService.state().getRoutingNodes().node(capturedRequest.node.id()).get(request.shardId.id());
ShardRouting routing = clusterService.state().getRoutingNodes().node(capturedRequest.node.getId()).get(request.shardId.id());
// and the shard that was requested to be failed
ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry) shardFailedRequest.request;
// the shard the request was sent to and the shard to be failed should be the same

View File

@ -72,7 +72,7 @@ public class TransportClientIT extends ESIntegTestCase {
}
for (DiscoveryNode discoveryNode : nodeService.listedNodes()) {
assertThat(discoveryNode.id(), startsWith("#transport#-"));
assertThat(discoveryNode.getId(), startsWith("#transport#-"));
assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT.minimumCompatibilityVersion()));
}

View File

@ -74,7 +74,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
emptyMap(), emptySet(), Version.CURRENT);
DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"),
emptyMap(), emptySet(), Version.CURRENT);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.id()).build();
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.getId()).build();
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode);

View File

@ -36,9 +36,9 @@ public class ClusterStateTests extends ESTestCase {
final DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build();
ClusterState noMaster1 = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(nodes).build();
ClusterState noMaster2 = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(nodes).build();
ClusterState withMaster1a = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.id())).build();
ClusterState withMaster1b = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.id())).build();
ClusterState withMaster2 = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node2.id())).build();
ClusterState withMaster1a = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId())).build();
ClusterState withMaster1b = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId())).build();
ClusterState withMaster2 = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node2.getId())).build();
// states with no master should never supersede anything
assertFalse(noMaster1.supersedes(noMaster2));

View File

@ -84,7 +84,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase {
String excludedNodeId = null;
for (NodeInfo nodeInfo : nodesInfo) {
if (nodeInfo.getNode().isDataNode()) {
excludedNodeId = nodeInfo.getNode().id();
excludedNodeId = nodeInfo.getNode().getId();
break;
}
}
@ -102,7 +102,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
assert clusterState.nodes() != null;
if (shardRouting.unassigned() == false && clusterState.nodes().get(shardRouting.currentNodeId()).id().equals(excludedNodeId)) {
if (shardRouting.unassigned() == false && clusterState.nodes().get(shardRouting.currentNodeId()).getId().equals(excludedNodeId)) {
//if the shard is still there it must be relocating and all nodes need to know, since the request was acknowledged
//reroute happens as part of the update settings and we made sure no throttling comes into the picture via settings
assertThat(shardRouting.relocating(), equalTo(true));
@ -127,7 +127,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase {
String excludedNodeId = null;
for (NodeInfo nodeInfo : nodesInfo) {
if (nodeInfo.getNode().isDataNode()) {
excludedNodeId = nodeInfo.getNode().id();
excludedNodeId = nodeInfo.getNode().getId();
break;
}
}

View File

@ -375,7 +375,7 @@ public class ShardStateActionTests extends ESTestCase {
private void setUpMasterRetryVerification(int numberOfRetries, AtomicInteger retries, CountDownLatch latch, LongConsumer retryLoop) {
shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> {
DiscoveryNodes.Builder masterBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
masterBuilder.masterNodeId(clusterService.state().nodes().masterNodes().iterator().next().value.id());
masterBuilder.masterNodeId(clusterService.state().nodes().masterNodes().iterator().next().value.getId());
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(masterBuilder));
});

View File

@ -22,10 +22,8 @@ package org.elasticsearch.cluster.allocation;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
@ -107,7 +105,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
.setDryRun(true)
.execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
logger.info("--> get the state, verify nothing changed because of the dry run");
state = client().admin().cluster().prepareState().execute().actionGet().getState();
@ -119,7 +117,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
.execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));
@ -127,7 +125,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
logger.info("--> get the state, verify shard 1 primary allocated");
state = client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.STARTED));
logger.info("--> move shard 1 primary from node1 to node2");
state = client().admin().cluster().prepareReroute()
@ -135,8 +133,8 @@ public class ClusterRerouteIT extends ESIntegTestCase {
.add(new MoveAllocationCommand("test", 0, node_1, node_2))
.execute().actionGet().getState();
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.RELOCATING));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.RELOCATING));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet();
@ -145,7 +143,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
logger.info("--> get the state, verify shard 1 primary moved from node1 to node2");
state = client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).get(0).state(), equalTo(ShardRoutingState.STARTED));
}
public void testRerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception {
@ -218,7 +216,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
.execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));
@ -226,7 +224,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
logger.info("--> get the state, verify shard 1 primary allocated");
state = client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.STARTED));
client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet();
final Index index = resolveIndex("test");
@ -253,7 +251,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
.execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));
@ -261,7 +259,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
logger.info("--> get the state, verify shard 1 primary allocated");
state = client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.STARTED));
}

View File

@ -41,8 +41,8 @@ public class DiscoveryNodesTests extends ESTestCase {
DiscoveryNodes discoveryNodes = buildDiscoveryNodes();
DiscoveryNode[] nodes = discoveryNodes.nodes().values().toArray(DiscoveryNode.class);
DiscoveryNode node = randomFrom(nodes);
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(randomBoolean() ? node.id() : node.name());
assertThat(resolvedNode.id(), equalTo(node.id()));
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(randomBoolean() ? node.getId() : node.name());
assertThat(resolvedNode.getId(), equalTo(node.getId()));
}
public void testResolveNodeByAttribute() {
@ -52,7 +52,7 @@ public class DiscoveryNodesTests extends ESTestCase {
try {
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(nodeSelector.selector);
assertThat(matchingNodeIds.size(), equalTo(1));
assertThat(resolvedNode.id(), equalTo(matchingNodeIds.iterator().next()));
assertThat(resolvedNode.getId(), equalTo(matchingNodeIds.iterator().next()));
} catch(IllegalArgumentException e) {
if (matchingNodeIds.size() == 0) {
assertThat(e.getMessage(), equalTo("failed to resolve [" + nodeSelector.selector + "], no matching nodes"));
@ -88,7 +88,7 @@ public class DiscoveryNodesTests extends ESTestCase {
for (int i = 0; i < numNodeNames; i++) {
DiscoveryNode discoveryNode = randomFrom(nodes);
nodeSelectors.add(discoveryNode.name());
expectedNodeIdsSet.add(discoveryNode.id());
expectedNodeIdsSet.add(discoveryNode.getId());
}
String[] resolvedNodesIds = discoveryNodes.resolveNodesIds(nodeSelectors.toArray(new String[nodeSelectors.size()]));
@ -111,8 +111,8 @@ public class DiscoveryNodesTests extends ESTestCase {
discoBuilder = discoBuilder.put(node);
nodesList.add(node);
}
discoBuilder.localNodeId(randomFrom(nodesList).id());
discoBuilder.masterNodeId(randomFrom(nodesList).id());
discoBuilder.localNodeId(randomFrom(nodesList).getId());
discoBuilder.masterNodeId(randomFrom(nodesList).getId());
return discoBuilder.build();
}
@ -158,7 +158,7 @@ public class DiscoveryNodesTests extends ESTestCase {
Set<String> ids = new HashSet<>();
nodes.getNodes().valuesIt().forEachRemaining(node -> {
if ("value".equals(node.getAttributes().get("attr"))) {
ids.add(node.id());
ids.add(node.getId());
}
});
return ids;

View File

@ -389,7 +389,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
ArrayList<DiscoveryNode> discoveryNodes = CollectionUtils.iterableAsArrayList(clusterState.nodes());
Collections.shuffle(discoveryNodes, random());
for (DiscoveryNode node : discoveryNodes) {
nodes.remove(node.id());
nodes.remove(node.getId());
numNodes--;
if (numNodes <= 0) {
break;

View File

@ -319,14 +319,14 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
RoutingTable routingTable = RoutingTable.builder()
.add(IndexRoutingTable.builder(shard1.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shard1)
.addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), newNode.id(), true, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), oldNode1.id(), false, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), newNode.getId(), true, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), oldNode1.getId(), false, ShardRoutingState.STARTED))
.build())
)
.add(IndexRoutingTable.builder(shard2.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shard2)
.addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), newNode.id(), true, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), oldNode1.id(), false, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), newNode.getId(), true, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), oldNode1.getId(), false, ShardRoutingState.STARTED))
.build())
)
.build();

View File

@ -122,8 +122,8 @@ public class ZenFaultDetectionTests extends ESTestCase {
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
builder.put(nodeA);
builder.put(nodeB);
builder.localNodeId(nodeA.id());
builder.masterNodeId(master ? nodeA.id() : nodeB.id());
builder.localNodeId(nodeA.getId());
builder.masterNodeId(master ? nodeA.getId() : nodeB.getId());
return builder.build();
}
@ -131,8 +131,8 @@ public class ZenFaultDetectionTests extends ESTestCase {
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
builder.put(nodeA);
builder.put(nodeB);
builder.localNodeId(nodeB.id());
builder.masterNodeId(master ? nodeB.id() : nodeA.id());
builder.localNodeId(nodeB.getId());
builder.masterNodeId(master ? nodeB.getId() : nodeA.getId());
return builder.build();
}

View File

@ -68,7 +68,7 @@ public class ElectMasterServiceTests extends ESTestCase {
if (!prevNode.masterNode()) {
assertFalse(node.masterNode());
} else if (node.masterNode()) {
assertTrue(prevNode.id().compareTo(node.id()) < 0);
assertTrue(prevNode.getId().compareTo(node.getId()) < 0);
}
prevNode = node;
}
@ -100,7 +100,7 @@ public class ElectMasterServiceTests extends ESTestCase {
assertNotNull(master);
for (DiscoveryNode node : nodes) {
if (node.masterNode()) {
assertTrue(master.id().compareTo(node.id()) <= 0);
assertTrue(master.getId().compareTo(node.getId()) <= 0);
}
}
}

View File

@ -97,7 +97,7 @@ public class NodeJoinControllerTests extends ESTestCase {
final DiscoveryNode localNode = initialNodes.localNode();
// make sure we have a master
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(
DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.id())));
DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.getId())));
nodeJoinController = new NodeJoinController(clusterService, new NoopRoutingService(Settings.EMPTY),
new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)),
Settings.EMPTY);
@ -555,7 +555,7 @@ public class NodeJoinControllerTests extends ESTestCase {
logger.info("assert for [{}] in:\n{}", expectedNodes, state.prettyPrint());
DiscoveryNodes discoveryNodes = state.nodes();
for (DiscoveryNode node : expectedNodes) {
assertThat("missing " + node + "\n" + discoveryNodes.prettyPrint(), discoveryNodes.get(node.id()), equalTo(node));
assertThat("missing " + node + "\n" + discoveryNodes.prettyPrint(), discoveryNodes.get(node.getId()), equalTo(node));
}
assertThat(discoveryNodes.size(), equalTo(expectedNodes.size()));
}

View File

@ -72,7 +72,7 @@ public class ZenPingTests extends ESTestCase {
ZenPing.PingResponse[] aggregate = collection.toArray();
for (ZenPing.PingResponse ping : aggregate) {
int nodeId = Integer.parseInt(ping.node().id());
int nodeId = Integer.parseInt(ping.node().getId());
assertThat(maxIdPerNode[nodeId], equalTo(ping.id()));
assertThat(masterPerNode[nodeId], equalTo(ping.master()));
assertThat(hasJoinedOncePerNode[nodeId], equalTo(ping.hasJoinedOnce()));

View File

@ -110,14 +110,14 @@ public class UnicastZenPingIT extends ESTestCase {
logger.info("ping from UZP_A");
ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(10));
assertThat(pingResponses.length, equalTo(1));
assertThat(pingResponses[0].node().id(), equalTo("UZP_B"));
assertThat(pingResponses[0].node().getId(), equalTo("UZP_B"));
assertTrue(pingResponses[0].hasJoinedOnce());
// ping again, this time from B,
logger.info("ping from UZP_B");
pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(10));
assertThat(pingResponses.length, equalTo(1));
assertThat(pingResponses[0].node().id(), equalTo("UZP_A"));
assertThat(pingResponses[0].node().getId(), equalTo("UZP_A"));
assertFalse(pingResponses[0].hasJoinedOnce());
} finally {

View File

@ -97,11 +97,11 @@ public class PublishClusterStateActionTests extends ESTestCase {
this.service = service;
this.listener = listener;
this.logger = logger;
this.clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().put(discoveryNode).localNodeId(discoveryNode.id()).build()).build();
this.clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().put(discoveryNode).localNodeId(discoveryNode.getId()).build()).build();
}
public MockNode setAsMaster() {
this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(discoveryNode.id())).build();
this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(discoveryNode.getId())).build();
return this;
}
@ -306,8 +306,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
.put(nodeA.discoveryNode)
.put(nodeB.discoveryNode)
.put(nodeC.discoveryNode)
.masterNodeId(nodeB.discoveryNode.id())
.localNodeId(nodeB.discoveryNode.id())
.masterNodeId(nodeB.discoveryNode.getId())
.localNodeId(nodeB.discoveryNode.getId())
.build();
previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
@ -358,7 +358,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
});
// Initial cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).masterNodeId(nodeA.discoveryNode.id()).build();
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.getId()).masterNodeId(nodeA.discoveryNode.getId()).build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodes).build();
// cluster state update - add nodeB
@ -485,7 +485,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
for (int i = 0; i < dataNodes; i++) {
discoveryNodesBuilder.put(createMockNode("data_" + i, dataSettings).discoveryNode);
}
discoveryNodesBuilder.localNodeId(master.discoveryNode.id()).masterNodeId(master.discoveryNode.id());
discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId());
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
MetaData metaData = MetaData.EMPTY_META_DATA;
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).nodes(discoveryNodes).build();
@ -562,7 +562,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
logger.info("--> expecting commit to {}. good nodes [{}], errors [{}], timeouts [{}]. min_master_nodes [{}]",
expectedBehavior, goodNodes + 1, errorNodes, timeOutNodes, minMasterNodes);
discoveryNodesBuilder.localNodeId(master.discoveryNode.id()).masterNodeId(master.discoveryNode.id());
discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId());
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
MetaData metaData = MetaData.EMPTY_META_DATA;
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).nodes(discoveryNodes).build();
@ -623,7 +623,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
try {
MockNode otherNode = createMockNode("otherNode");
state = ClusterState.builder(node.clusterState).nodes(
DiscoveryNodes.builder(node.nodes()).put(otherNode.discoveryNode).localNodeId(otherNode.discoveryNode.id()).build()
DiscoveryNodes.builder(node.nodes()).put(otherNode.discoveryNode).localNodeId(otherNode.discoveryNode.getId()).build()
).incrementVersion().build();
node.action.validateIncomingState(state, node.clusterState);
fail("node accepted state with existent but wrong local node");
@ -696,7 +696,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
MockNode master = createMockNode("master", settings);
MockNode node = createMockNode("node", settings);
ClusterState state = ClusterState.builder(master.clusterState)
.nodes(DiscoveryNodes.builder(master.clusterState.nodes()).put(node.discoveryNode).masterNodeId(master.discoveryNode.id())).build();
.nodes(DiscoveryNodes.builder(master.clusterState.nodes()).put(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build();
for (int i = 0; i < 10; i++) {
state = ClusterState.builder(state).incrementVersion().build();

View File

@ -135,7 +135,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId()));
}
/**
@ -173,7 +173,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId()));
if (useAllocationIds) {
// check that allocation id is reused
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("allocId1"));
@ -195,7 +195,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
DiscoveryNode allocatedNode = node1HasPrimaryShard ? node1 : node2;
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(allocatedNode.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(allocatedNode.getId()));
}
/**
@ -234,7 +234,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId()));
}
/**
@ -247,7 +247,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId()));
}
/**
@ -265,7 +265,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("some allocId"));
}
@ -466,7 +466,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), anyOf(equalTo(node2.id()), equalTo(node1.id())));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), anyOf(equalTo(node2.getId()), equalTo(node1.getId())));
}
/**
@ -507,7 +507,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId()));
}
private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, boolean asNew, Version version, String... activeAllocationIds) {

View File

@ -122,7 +122,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
.addData(nodeToMatch, false, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId()));
}
/**
@ -135,7 +135,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
.addData(nodeToMatch, false, "MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM"));
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId()));
}
/**
@ -148,7 +148,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
.addData(nodeToMatch, false, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId()));
}
/**
@ -246,7 +246,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId()));
}
public void testCancelRecoveryBetterSyncId() {
@ -284,7 +284,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
}
private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) {
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED);
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.getId(), true, ShardRoutingState.STARTED);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT).put(settings))
.numberOfShards(1).numberOfReplicas(1)
@ -306,7 +306,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
}
private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) {
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED);
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.getId(), true, ShardRoutingState.STARTED);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT))
.numberOfShards(1).numberOfReplicas(1)
@ -316,7 +316,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
.add(IndexRoutingTable.builder(shardId.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shardId)
.addShard(primaryShard)
.addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node2.id(), null, null, false, ShardRoutingState.INITIALIZING, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)))
.addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node2.getId(), null, null, false, ShardRoutingState.INITIALIZING, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)))
.build())
)
.build();

View File

@ -561,7 +561,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
ensureSearchable(indexName);
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
final String blueNodeId = internalCluster().getInstance(ClusterService.class, blueNodeName).localNode().id();
final String blueNodeId = internalCluster().getInstance(ClusterService.class, blueNodeName).localNode().getId();
assertFalse(stateResponse.getState().getRoutingNodes().node(blueNodeId).isEmpty());

View File

@ -381,8 +381,8 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
final String masterNode = internalCluster().getMasterName();
final String nonMasterNode = nodes.get(0).equals(masterNode) ? nodes.get(1) : nodes.get(0);
final String masterId = internalCluster().clusterService(masterNode).localNode().id();
final String nonMasterId = internalCluster().clusterService(nonMasterNode).localNode().id();
final String masterId = internalCluster().clusterService(masterNode).localNode().getId();
final String nonMasterId = internalCluster().clusterService(nonMasterNode).localNode().getId();
final int numShards = scaledRandomIntBetween(2, 10);
assertAcked(prepareCreate("test")

View File

@ -140,7 +140,7 @@ public class IndicesStoreTests extends ESTestCase {
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode).put(new DiscoveryNode("xyz",
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode).put(new DiscoveryNode("xyz",
new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT)));
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1));
int localShardId = randomInt(numShards - 1);
@ -163,7 +163,7 @@ public class IndicesStoreTests extends ESTestCase {
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode));
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode));
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1));
for (int i = 0; i < numShards; i++) {
String relocatingNodeId = randomBoolean() ? null : "def";
@ -185,7 +185,7 @@ public class IndicesStoreTests extends ESTestCase {
final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random());
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode).put(new DiscoveryNode("xyz",
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode).put(new DiscoveryNode("xyz",
new LocalTransportAddress("xyz"), emptyMap(), emptySet(), nodeVersion)));
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1));
for (int i = 0; i < numShards; i++) {
@ -207,7 +207,7 @@ public class IndicesStoreTests extends ESTestCase {
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random());
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id())
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId())
.put(localNode)
.put(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT))
.put(new DiscoveryNode("def", new LocalTransportAddress("def"), emptyMap(), emptySet(), nodeVersion) // <-- only set relocating, since we're testing that in this test

View File

@ -93,10 +93,10 @@ public class CapturingTransport implements Transport {
public Map<String, List<CapturedRequest>> capturedRequestsByTargetNode() {
Map<String, List<CapturedRequest>> map = new HashMap<>();
for (CapturedRequest request : capturedRequests) {
List<CapturedRequest> nodeList = map.get(request.node.id());
List<CapturedRequest> nodeList = map.get(request.node.getId());
if (nodeList == null) {
nodeList = new ArrayList<>();
map.put(request.node.id(), nodeList);
map.put(request.node.getId(), nodeList);
}
nodeList.add(request);
}