Remove DiscoveryNode#id in favour of existing DiscoveryNode#getId
This commit is contained in:
parent
eed885eeab
commit
9889f10e5e
|
@ -68,7 +68,7 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
||||||
|
|
||||||
builder.startObject("nodes");
|
builder.startObject("nodes");
|
||||||
for (NodeInfo nodeInfo : this) {
|
for (NodeInfo nodeInfo : this) {
|
||||||
builder.startObject(nodeInfo.getNode().id(), XContentBuilder.FieldCaseConversion.NONE);
|
builder.startObject(nodeInfo.getNode().getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||||
|
|
||||||
builder.field("name", nodeInfo.getNode().name(), XContentBuilder.FieldCaseConversion.NONE);
|
builder.field("name", nodeInfo.getNode().name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||||
builder.field("transport_address", nodeInfo.getNode().address().toString());
|
builder.field("transport_address", nodeInfo.getNode().address().toString());
|
||||||
|
|
|
@ -65,7 +65,7 @@ public class NodesStatsResponse extends BaseNodesResponse<NodeStats> implements
|
||||||
|
|
||||||
builder.startObject("nodes");
|
builder.startObject("nodes");
|
||||||
for (NodeStats nodeStats : this) {
|
for (NodeStats nodeStats : this) {
|
||||||
builder.startObject(nodeStats.getNode().id(), XContentBuilder.FieldCaseConversion.NONE);
|
builder.startObject(nodeStats.getNode().getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||||
builder.field("timestamp", nodeStats.getTimestamp());
|
builder.field("timestamp", nodeStats.getTimestamp());
|
||||||
nodeStats.toXContent(builder, params);
|
nodeStats.toXContent(builder, params);
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject(Fields.NODES);
|
builder.startObject(Fields.NODES);
|
||||||
for (DiscoveryNode node : nodes) {
|
for (DiscoveryNode node : nodes) {
|
||||||
builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE);
|
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||||
builder.field(Fields.NAME, node.name(), XContentBuilder.FieldCaseConversion.NONE);
|
builder.field(Fields.NAME, node.name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,7 +107,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
||||||
protected NodeSnapshotStatus nodeOperation(NodeRequest request) {
|
protected NodeSnapshotStatus nodeOperation(NodeRequest request) {
|
||||||
Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = new HashMap<>();
|
Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = new HashMap<>();
|
||||||
try {
|
try {
|
||||||
String nodeId = clusterService.localNode().id();
|
String nodeId = clusterService.localNode().getId();
|
||||||
for (SnapshotId snapshotId : request.snapshotIds) {
|
for (SnapshotId snapshotId : request.snapshotIds) {
|
||||||
Map<ShardId, IndexShardSnapshotStatus> shardsStatus = snapshotShardsService.currentSnapshotShards(snapshotId);
|
Map<ShardId, IndexShardSnapshotStatus> shardsStatus = snapshotShardsService.currentSnapshotShards(snapshotId);
|
||||||
if (shardsStatus == null) {
|
if (shardsStatus == null) {
|
||||||
|
|
|
@ -194,7 +194,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
||||||
}
|
}
|
||||||
|
|
||||||
private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
|
private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
|
||||||
for (ShardRouting shardRouting : routingNodes.node(node.id())) {
|
for (ShardRouting shardRouting : routingNodes.node(node.getId())) {
|
||||||
ShardId shardId = shardRouting.shardId();
|
ShardId shardId = shardRouting.shardId();
|
||||||
if (shardId.id() == shardID && shardId.getIndexName().equals(index)) {
|
if (shardId.id() == shardID && shardId.getIndexName().equals(index)) {
|
||||||
if (shardRouting.primary()) {
|
if (shardRouting.primary()) {
|
||||||
|
|
|
@ -155,7 +155,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Throwable t) {
|
public void onFailure(Throwable t) {
|
||||||
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
|
onFirstPhaseResult(shardIndex, shard, node.getId(), shardIt, t);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -299,7 +299,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||||
try {
|
try {
|
||||||
NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards);
|
NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards);
|
||||||
if (task != null) {
|
if (task != null) {
|
||||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||||
taskManager.registerChildTask(task, node.getId());
|
taskManager.registerChildTask(task, node.getId());
|
||||||
}
|
}
|
||||||
transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler<NodeResponse>() {
|
transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler<NodeResponse>() {
|
||||||
|
@ -330,7 +330,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||||
|
|
||||||
protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) {
|
protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) {
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
|
logger.trace("received response for [{}] from node [{}]", actionName, node.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
// this is defensive to protect against the possibility of double invocation
|
// this is defensive to protect against the possibility of double invocation
|
||||||
|
@ -344,7 +344,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
|
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
|
||||||
String nodeId = node.id();
|
String nodeId = node.getId();
|
||||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||||
logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId);
|
logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId);
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,7 +81,7 @@ public abstract class BaseNodesResponse<TNodeResponse extends BaseNodeResponse>
|
||||||
if (nodesMap == null) {
|
if (nodesMap == null) {
|
||||||
nodesMap = new HashMap<>();
|
nodesMap = new HashMap<>();
|
||||||
for (TNodeResponse nodeResponse : nodes) {
|
for (TNodeResponse nodeResponse : nodes) {
|
||||||
nodesMap.put(nodeResponse.getNode().id(), nodeResponse);
|
nodesMap.put(nodeResponse.getNode().getId(), nodeResponse);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nodesMap;
|
return nodesMap;
|
||||||
|
|
|
@ -161,7 +161,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||||
} else {
|
} else {
|
||||||
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
|
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
|
||||||
if (task != null) {
|
if (task != null) {
|
||||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||||
taskManager.registerChildTask(task, node.getId());
|
taskManager.registerChildTask(task, node.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
onFailure(idx, node.id(), exp);
|
onFailure(idx, node.getId(), exp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -552,7 +552,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||||
final Throwable cause = exp.unwrapCause();
|
final Throwable cause = exp.unwrapCause();
|
||||||
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
|
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
|
||||||
(isPrimaryAction && retryPrimaryException(cause))) {
|
(isPrimaryAction && retryPrimaryException(cause))) {
|
||||||
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.id(), request);
|
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.getId(), request);
|
||||||
retry(exp);
|
retry(exp);
|
||||||
} else {
|
} else {
|
||||||
finishAsFailed(exp);
|
finishAsFailed(exp);
|
||||||
|
|
|
@ -113,10 +113,10 @@ public abstract class TransportTasksAction<
|
||||||
results.add(response);
|
results.add(response);
|
||||||
}
|
}
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
exceptions.add(new TaskOperationFailure(clusterService.localNode().id(), task.getId(), ex));
|
exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), task.getId(), ex));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
return new NodeTasksResponse(clusterService.localNode().id(), results, exceptions);
|
return new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
|
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
|
||||||
|
@ -237,7 +237,7 @@ public abstract class TransportTasksAction<
|
||||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||||
} else {
|
} else {
|
||||||
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
|
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
|
||||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||||
taskManager.registerChildTask(task, node.getId());
|
taskManager.registerChildTask(task, node.getId());
|
||||||
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(),
|
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(),
|
||||||
new BaseTransportResponseHandler<NodeTasksResponse>() {
|
new BaseTransportResponseHandler<NodeTasksResponse>() {
|
||||||
|
@ -253,7 +253,7 @@ public abstract class TransportTasksAction<
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
onFailure(idx, node.id(), exp);
|
onFailure(idx, node.getId(), exp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -383,7 +383,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||||
// use discovered information but do keep the original transport address,
|
// use discovered information but do keep the original transport address,
|
||||||
// so people can control which address is exactly used.
|
// so people can control which address is exactly used.
|
||||||
DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode();
|
DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode();
|
||||||
newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.id(), nodeWithInfo.getHostName(),
|
newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.getId(), nodeWithInfo.getHostName(),
|
||||||
nodeWithInfo.getHostAddress(), listedNode.getAddress(), nodeWithInfo.getAttributes(),
|
nodeWithInfo.getHostAddress(), listedNode.getAddress(), nodeWithInfo.getAttributes(),
|
||||||
nodeWithInfo.getRoles(), nodeWithInfo.getVersion()));
|
nodeWithInfo.getRoles(), nodeWithInfo.getVersion()));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -409,7 +409,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||||
mostAvailablePath = info;
|
mostAvailablePath = info;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
String nodeId = nodeStats.getNode().id();
|
String nodeId = nodeStats.getNode().getId();
|
||||||
String nodeName = nodeStats.getNode().getName();
|
String nodeName = nodeStats.getNode().getName();
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}",
|
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}",
|
||||||
|
|
|
@ -221,18 +221,11 @@ public class DiscoveryNode implements Writeable<DiscoveryNode>, ToXContent {
|
||||||
return address();
|
return address();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* The unique id of the node.
|
|
||||||
*/
|
|
||||||
public String id() {
|
|
||||||
return nodeId;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The unique id of the node.
|
* The unique id of the node.
|
||||||
*/
|
*/
|
||||||
public String getId() {
|
public String getId() {
|
||||||
return id();
|
return nodeId;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -377,7 +370,7 @@ public class DiscoveryNode implements Writeable<DiscoveryNode>, ToXContent {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject(id(), XContentBuilder.FieldCaseConversion.NONE);
|
builder.startObject(getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||||
builder.field("name", name());
|
builder.field("name", name());
|
||||||
builder.field("transport_address", address().toString());
|
builder.field("transport_address", address().toString());
|
||||||
|
|
||||||
|
|
|
@ -155,7 +155,7 @@ public class DiscoveryNodeFilters {
|
||||||
}
|
}
|
||||||
} else if ("_id".equals(attr)) {
|
} else if ("_id".equals(attr)) {
|
||||||
for (String value : values) {
|
for (String value : values) {
|
||||||
if (node.id().equals(value)) {
|
if (node.getId().equals(value)) {
|
||||||
if (opType == OpType.OR) {
|
if (opType == OpType.OR) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -340,7 +340,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
int index = 0;
|
int index = 0;
|
||||||
nodesIds = new String[nodes.size()];
|
nodesIds = new String[nodes.size()];
|
||||||
for (DiscoveryNode node : this) {
|
for (DiscoveryNode node : this) {
|
||||||
nodesIds[index++] = node.id();
|
nodesIds[index++] = node.getId();
|
||||||
}
|
}
|
||||||
return nodesIds;
|
return nodesIds;
|
||||||
} else {
|
} else {
|
||||||
|
@ -362,14 +362,14 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
// not a node id, try and search by name
|
// not a node id, try and search by name
|
||||||
for (DiscoveryNode node : this) {
|
for (DiscoveryNode node : this) {
|
||||||
if (Regex.simpleMatch(nodeId, node.name())) {
|
if (Regex.simpleMatch(nodeId, node.name())) {
|
||||||
resolvedNodesIds.add(node.id());
|
resolvedNodesIds.add(node.getId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (DiscoveryNode node : this) {
|
for (DiscoveryNode node : this) {
|
||||||
if (Regex.simpleMatch(nodeId, node.getHostAddress())) {
|
if (Regex.simpleMatch(nodeId, node.getHostAddress())) {
|
||||||
resolvedNodesIds.add(node.id());
|
resolvedNodesIds.add(node.getId());
|
||||||
} else if (Regex.simpleMatch(nodeId, node.getHostName())) {
|
} else if (Regex.simpleMatch(nodeId, node.getHostName())) {
|
||||||
resolvedNodesIds.add(node.id());
|
resolvedNodesIds.add(node.getId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
int index = nodeId.indexOf(':');
|
int index = nodeId.indexOf(':');
|
||||||
|
@ -400,7 +400,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
String attrName = entry.getKey();
|
String attrName = entry.getKey();
|
||||||
String attrValue = entry.getValue();
|
String attrValue = entry.getValue();
|
||||||
if (Regex.simpleMatch(matchAttrName, attrName) && Regex.simpleMatch(matchAttrValue, attrValue)) {
|
if (Regex.simpleMatch(matchAttrName, attrName) && Regex.simpleMatch(matchAttrValue, attrValue)) {
|
||||||
resolvedNodesIds.add(node.id());
|
resolvedNodesIds.add(node.getId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -415,7 +415,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
public DiscoveryNodes removeDeadMembers(Set<String> newNodes, String masterNodeId) {
|
public DiscoveryNodes removeDeadMembers(Set<String> newNodes, String masterNodeId) {
|
||||||
Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId);
|
Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId);
|
||||||
for (DiscoveryNode node : this) {
|
for (DiscoveryNode node : this) {
|
||||||
if (newNodes.contains(node.id())) {
|
if (newNodes.contains(node.getId())) {
|
||||||
builder.put(node);
|
builder.put(node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -433,12 +433,12 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
List<DiscoveryNode> removed = new ArrayList<>();
|
List<DiscoveryNode> removed = new ArrayList<>();
|
||||||
List<DiscoveryNode> added = new ArrayList<>();
|
List<DiscoveryNode> added = new ArrayList<>();
|
||||||
for (DiscoveryNode node : other) {
|
for (DiscoveryNode node : other) {
|
||||||
if (!this.nodeExists(node.id())) {
|
if (!this.nodeExists(node.getId())) {
|
||||||
removed.add(node);
|
removed.add(node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (DiscoveryNode node : this) {
|
for (DiscoveryNode node : this) {
|
||||||
if (!other.nodeExists(node.id())) {
|
if (!other.nodeExists(node.getId())) {
|
||||||
added.add(node);
|
added.add(node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -539,7 +539,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
public String shortSummary() {
|
public String shortSummary() {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
if (!removed() && masterNodeChanged()) {
|
if (!removed() && masterNodeChanged()) {
|
||||||
if (newMasterNode.id().equals(localNodeId)) {
|
if (newMasterNode.getId().equals(localNodeId)) {
|
||||||
// we are the master, no nodes we removed, we are actually the first master
|
// we are the master, no nodes we removed, we are actually the first master
|
||||||
sb.append("new_master ").append(newMasterNode());
|
sb.append("new_master ").append(newMasterNode());
|
||||||
} else {
|
} else {
|
||||||
|
@ -567,13 +567,13 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
}
|
}
|
||||||
if (added()) {
|
if (added()) {
|
||||||
// don't print if there is one added, and it is us
|
// don't print if there is one added, and it is us
|
||||||
if (!(addedNodes().size() == 1 && addedNodes().get(0).id().equals(localNodeId))) {
|
if (!(addedNodes().size() == 1 && addedNodes().get(0).getId().equals(localNodeId))) {
|
||||||
if (removed() || masterNodeChanged()) {
|
if (removed() || masterNodeChanged()) {
|
||||||
sb.append(", ");
|
sb.append(", ");
|
||||||
}
|
}
|
||||||
sb.append("added {");
|
sb.append("added {");
|
||||||
for (DiscoveryNode node : addedNodes()) {
|
for (DiscoveryNode node : addedNodes()) {
|
||||||
if (!node.id().equals(localNodeId)) {
|
if (!node.getId().equals(localNodeId)) {
|
||||||
// don't print ourself
|
// don't print ourself
|
||||||
sb.append(node).append(',');
|
sb.append(node).append(',');
|
||||||
}
|
}
|
||||||
|
@ -605,12 +605,12 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
builder.masterNodeId(in.readString());
|
builder.masterNodeId(in.readString());
|
||||||
}
|
}
|
||||||
if (localNode != null) {
|
if (localNode != null) {
|
||||||
builder.localNodeId(localNode.id());
|
builder.localNodeId(localNode.getId());
|
||||||
}
|
}
|
||||||
int size = in.readVInt();
|
int size = in.readVInt();
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
DiscoveryNode node = new DiscoveryNode(in);
|
DiscoveryNode node = new DiscoveryNode(in);
|
||||||
if (localNode != null && node.id().equals(localNode.id())) {
|
if (localNode != null && node.getId().equals(localNode.getId())) {
|
||||||
// reuse the same instance of our address and local node id for faster equality
|
// reuse the same instance of our address and local node id for faster equality
|
||||||
node = localNode;
|
node = localNode;
|
||||||
}
|
}
|
||||||
|
@ -649,7 +649,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder put(DiscoveryNode node) {
|
public Builder put(DiscoveryNode node) {
|
||||||
nodes.put(node.id(), node);
|
nodes.put(node.getId(), node);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||||
Map<String, List<ShardRouting>> nodesToShards = new HashMap<>();
|
Map<String, List<ShardRouting>> nodesToShards = new HashMap<>();
|
||||||
// fill in the nodeToShards with the "live" nodes
|
// fill in the nodeToShards with the "live" nodes
|
||||||
for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) {
|
for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) {
|
||||||
nodesToShards.put(cursor.value.id(), new ArrayList<>());
|
nodesToShards.put(cursor.value.getId(), new ArrayList<>());
|
||||||
}
|
}
|
||||||
|
|
||||||
// fill in the inverse of node -> shards allocated
|
// fill in the inverse of node -> shards allocated
|
||||||
|
@ -570,7 +570,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||||
|
|
||||||
public void addNode(DiscoveryNode node) {
|
public void addNode(DiscoveryNode node) {
|
||||||
ensureMutable();
|
ensureMutable();
|
||||||
RoutingNode routingNode = new RoutingNode(node.id(), node);
|
RoutingNode routingNode = new RoutingNode(node.getId(), node);
|
||||||
nodesToShards.put(routingNode.nodeId(), routingNode);
|
nodesToShards.put(routingNode.nodeId(), routingNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,7 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
|
||||||
return explainOrThrowRejectedCommand(explain, allocation, e);
|
return explainOrThrowRejectedCommand(explain, allocation, e);
|
||||||
}
|
}
|
||||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||||
RoutingNode routingNode = routingNodes.node(discoNode.id());
|
RoutingNode routingNode = routingNodes.node(discoNode.getId());
|
||||||
if (routingNode == null) {
|
if (routingNode == null) {
|
||||||
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
|
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.index.IndexNotFoundException;
|
import org.elasticsearch.index.IndexNotFoundException;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
|
||||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -90,7 +89,7 @@ public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocation
|
||||||
return explainOrThrowRejectedCommand(explain, allocation, e);
|
return explainOrThrowRejectedCommand(explain, allocation, e);
|
||||||
}
|
}
|
||||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||||
RoutingNode routingNode = routingNodes.node(discoNode.id());
|
RoutingNode routingNode = routingNodes.node(discoNode.getId());
|
||||||
if (routingNode == null) {
|
if (routingNode == null) {
|
||||||
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
|
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.index.IndexNotFoundException;
|
import org.elasticsearch.index.IndexNotFoundException;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
|
||||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -92,7 +91,7 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation
|
||||||
return explainOrThrowRejectedCommand(explain, allocation, e);
|
return explainOrThrowRejectedCommand(explain, allocation, e);
|
||||||
}
|
}
|
||||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||||
RoutingNode routingNode = routingNodes.node(discoNode.id());
|
RoutingNode routingNode = routingNodes.node(discoNode.getId());
|
||||||
if (routingNode == null) {
|
if (routingNode == null) {
|
||||||
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
|
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
|
||||||
}
|
}
|
||||||
|
|
|
@ -175,7 +175,7 @@ public class CancelAllocationCommand implements AllocationCommand {
|
||||||
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
|
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
|
||||||
DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
|
DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
|
||||||
boolean found = false;
|
boolean found = false;
|
||||||
for (RoutingNodes.RoutingNodeIterator it = allocation.routingNodes().routingNodeIter(discoNode.id()); it.hasNext(); ) {
|
for (RoutingNodes.RoutingNodeIterator it = allocation.routingNodes().routingNodeIter(discoNode.getId()); it.hasNext(); ) {
|
||||||
ShardRouting shardRouting = it.next();
|
ShardRouting shardRouting = it.next();
|
||||||
if (!shardRouting.shardId().getIndex().getName().equals(index)) {
|
if (!shardRouting.shardId().getIndex().getName().equals(index)) {
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -155,7 +155,7 @@ public class MoveAllocationCommand implements AllocationCommand {
|
||||||
Decision decision = null;
|
Decision decision = null;
|
||||||
|
|
||||||
boolean found = false;
|
boolean found = false;
|
||||||
for (ShardRouting shardRouting : allocation.routingNodes().node(fromDiscoNode.id())) {
|
for (ShardRouting shardRouting : allocation.routingNodes().node(fromDiscoNode.getId())) {
|
||||||
if (!shardRouting.shardId().getIndexName().equals(index)) {
|
if (!shardRouting.shardId().getIndexName().equals(index)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -174,7 +174,7 @@ public class MoveAllocationCommand implements AllocationCommand {
|
||||||
", shard is not started (state = " + shardRouting.state() + "]");
|
", shard is not started (state = " + shardRouting.state() + "]");
|
||||||
}
|
}
|
||||||
|
|
||||||
RoutingNode toRoutingNode = allocation.routingNodes().node(toDiscoNode.id());
|
RoutingNode toRoutingNode = allocation.routingNodes().node(toDiscoNode.getId());
|
||||||
decision = allocation.deciders().canAllocate(shardRouting, toRoutingNode, allocation);
|
decision = allocation.deciders().canAllocate(shardRouting, toRoutingNode, allocation);
|
||||||
if (decision.type() == Decision.Type.NO) {
|
if (decision.type() == Decision.Type.NO) {
|
||||||
if (explain) {
|
if (explain) {
|
||||||
|
|
|
@ -159,7 +159,7 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
|
||||||
|
|
||||||
synchronized public void setLocalNode(DiscoveryNode localNode) {
|
synchronized public void setLocalNode(DiscoveryNode localNode) {
|
||||||
assert clusterState.nodes().localNodeId() == null : "local node is already set";
|
assert clusterState.nodes().localNodeId() == null : "local node is already set";
|
||||||
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.id());
|
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.getId());
|
||||||
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
|
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
||||||
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
|
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
|
||||||
nodesBuilder.put(discovery.localNode());
|
nodesBuilder.put(discovery.localNode());
|
||||||
}
|
}
|
||||||
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
|
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
|
||||||
// remove the NO_MASTER block in this case
|
// remove the NO_MASTER block in this case
|
||||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock());
|
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock());
|
||||||
return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build();
|
return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build();
|
||||||
|
@ -155,7 +155,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
||||||
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
|
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
|
||||||
nodesBuilder.put(discovery.localNode());
|
nodesBuilder.put(discovery.localNode());
|
||||||
}
|
}
|
||||||
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
|
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
|
||||||
return ClusterState.builder(currentState).nodes(nodesBuilder).build();
|
return ClusterState.builder(currentState).nodes(nodesBuilder).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,7 +207,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
||||||
|
|
||||||
final Set<String> newMembers = new HashSet<>();
|
final Set<String> newMembers = new HashSet<>();
|
||||||
for (LocalDiscovery discovery : clusterGroup.members()) {
|
for (LocalDiscovery discovery : clusterGroup.members()) {
|
||||||
newMembers.add(discovery.localNode().id());
|
newMembers.add(discovery.localNode().getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
final LocalDiscovery master = firstMaster;
|
final LocalDiscovery master = firstMaster;
|
||||||
|
@ -219,7 +219,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ClusterState execute(ClusterState currentState) {
|
public ClusterState execute(ClusterState currentState) {
|
||||||
DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode().id());
|
DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode().getId());
|
||||||
DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes());
|
DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes());
|
||||||
if (delta.added()) {
|
if (delta.added()) {
|
||||||
logger.warn("No new nodes should be created when a new discovery view is accepted");
|
logger.warn("No new nodes should be created when a new discovery view is accepted");
|
||||||
|
@ -251,7 +251,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String nodeDescription() {
|
public String nodeDescription() {
|
||||||
return clusterName.value() + "/" + localNode().id();
|
return clusterName.value() + "/" + localNode().getId();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -312,7 +312,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
// we do the marshaling intentionally, to check it works well...
|
// we do the marshaling intentionally, to check it works well...
|
||||||
// check if we published cluster state at least once and node was in the cluster when we published cluster state the last time
|
// check if we published cluster state at least once and node was in the cluster when we published cluster state the last time
|
||||||
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode().id())) {
|
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode().getId())) {
|
||||||
// both conditions are true - which means we can try sending cluster state as diffs
|
// both conditions are true - which means we can try sending cluster state as diffs
|
||||||
if (clusterStateDiffBytes == null) {
|
if (clusterStateDiffBytes == null) {
|
||||||
Diff diff = clusterState.diff(clusterChangedEvent.previousState());
|
Diff diff = clusterState.diff(clusterChangedEvent.previousState());
|
||||||
|
|
|
@ -246,7 +246,7 @@ public class NodeJoinController extends AbstractComponent {
|
||||||
throw new NotMasterException("Node [" + clusterService.localNode() + "] not master for join request");
|
throw new NotMasterException("Node [" + clusterService.localNode() + "] not master for join request");
|
||||||
}
|
}
|
||||||
|
|
||||||
DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(currentState.nodes()).masterNodeId(currentState.nodes().localNode().id());
|
DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(currentState.nodes()).masterNodeId(currentState.nodes().localNode().getId());
|
||||||
// update the fact that we are the master...
|
// update the fact that we are the master...
|
||||||
ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock()).build();
|
ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock()).build();
|
||||||
currentState = ClusterState.builder(currentState).nodes(builder).blocks(clusterBlocks).build();
|
currentState = ClusterState.builder(currentState).nodes(builder).blocks(clusterBlocks).build();
|
||||||
|
@ -378,14 +378,14 @@ public class NodeJoinController extends AbstractComponent {
|
||||||
final DiscoveryNode node = entry.getKey();
|
final DiscoveryNode node = entry.getKey();
|
||||||
joinCallbacksToRespondTo.addAll(entry.getValue());
|
joinCallbacksToRespondTo.addAll(entry.getValue());
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
if (currentState.nodes().nodeExists(node.id())) {
|
if (currentState.nodes().nodeExists(node.getId())) {
|
||||||
logger.debug("received a join request for an existing node [{}]", node);
|
logger.debug("received a join request for an existing node [{}]", node);
|
||||||
} else {
|
} else {
|
||||||
nodeAdded = true;
|
nodeAdded = true;
|
||||||
nodesBuilder.put(node);
|
nodesBuilder.put(node);
|
||||||
for (DiscoveryNode existingNode : currentState.nodes()) {
|
for (DiscoveryNode existingNode : currentState.nodes()) {
|
||||||
if (node.address().equals(existingNode.address())) {
|
if (node.address().equals(existingNode.address())) {
|
||||||
nodesBuilder.remove(existingNode.id());
|
nodesBuilder.remove(existingNode.getId());
|
||||||
logger.warn("received join request from node [{}], but found existing node {} with same address, removing existing node", node, existingNode);
|
logger.warn("received join request from node [{}], but found existing node {} with same address, removing existing node", node, existingNode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -283,7 +283,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String nodeDescription() {
|
public String nodeDescription() {
|
||||||
return clusterName.value() + "/" + clusterService.localNode().id();
|
return clusterName.value() + "/" + clusterService.localNode().getId();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** start of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
|
/** start of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
|
||||||
|
@ -501,7 +501,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||||
clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||||
@Override
|
@Override
|
||||||
public ClusterState execute(ClusterState currentState) {
|
public ClusterState execute(ClusterState currentState) {
|
||||||
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.id());
|
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.getId());
|
||||||
currentState = ClusterState.builder(currentState).nodes(builder).build();
|
currentState = ClusterState.builder(currentState).nodes(builder).build();
|
||||||
// check if we have enough master nodes, if not, we need to move into joining the cluster again
|
// check if we have enough master nodes, if not, we need to move into joining the cluster again
|
||||||
if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
|
if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
|
||||||
|
@ -541,12 +541,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||||
clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||||
@Override
|
@Override
|
||||||
public ClusterState execute(ClusterState currentState) {
|
public ClusterState execute(ClusterState currentState) {
|
||||||
if (currentState.nodes().get(node.id()) == null) {
|
if (currentState.nodes().get(node.getId()) == null) {
|
||||||
logger.debug("node [{}] already removed from cluster state. ignoring.", node);
|
logger.debug("node [{}] already removed from cluster state. ignoring.", node);
|
||||||
return currentState;
|
return currentState;
|
||||||
}
|
}
|
||||||
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes())
|
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes())
|
||||||
.remove(node.id());
|
.remove(node.getId());
|
||||||
currentState = ClusterState.builder(currentState).nodes(builder).build();
|
currentState = ClusterState.builder(currentState).nodes(builder).build();
|
||||||
// check if we have enough master nodes, if not, we need to move into joining the cluster again
|
// check if we have enough master nodes, if not, we need to move into joining the cluster again
|
||||||
if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
|
if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
|
||||||
|
@ -634,14 +634,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ClusterState execute(ClusterState currentState) {
|
public ClusterState execute(ClusterState currentState) {
|
||||||
if (!masterNode.id().equals(currentState.nodes().masterNodeId())) {
|
if (!masterNode.getId().equals(currentState.nodes().masterNodeId())) {
|
||||||
// master got switched on us, no need to send anything
|
// master got switched on us, no need to send anything
|
||||||
return currentState;
|
return currentState;
|
||||||
}
|
}
|
||||||
|
|
||||||
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(currentState.nodes())
|
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(currentState.nodes())
|
||||||
// make sure the old master node, which has failed, is not part of the nodes we publish
|
// make sure the old master node, which has failed, is not part of the nodes we publish
|
||||||
.remove(masterNode.id())
|
.remove(masterNode.getId())
|
||||||
.masterNodeId(null).build();
|
.masterNodeId(null).build();
|
||||||
|
|
||||||
// flush any pending cluster states from old master, so it will not be set as master again
|
// flush any pending cluster states from old master, so it will not be set as master again
|
||||||
|
|
|
@ -154,7 +154,7 @@ public class ElectMasterService extends AbstractComponent {
|
||||||
if (!o1.masterNode() && o2.masterNode()) {
|
if (!o1.masterNode() && o2.masterNode()) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return o1.id().compareTo(o2.id());
|
return o1.getId().compareTo(o2.getId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -227,7 +227,7 @@ public class MasterFaultDetection extends FaultDetection {
|
||||||
threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
|
threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
final MasterPingRequest request = new MasterPingRequest(clusterService.localNode().id(), masterToPing.id(), clusterName);
|
final MasterPingRequest request = new MasterPingRequest(clusterService.localNode().getId(), masterToPing.getId(), clusterName);
|
||||||
final TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout).build();
|
final TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout).build();
|
||||||
transportService.sendRequest(masterToPing, MASTER_PING_ACTION_NAME, request, options, new BaseTransportResponseHandler<MasterPingResponseResponse>() {
|
transportService.sendRequest(masterToPing, MASTER_PING_ACTION_NAME, request, options, new BaseTransportResponseHandler<MasterPingResponseResponse>() {
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ public class NodesFaultDetection extends FaultDetection {
|
||||||
public void updateNodesAndPing(ClusterState clusterState) {
|
public void updateNodesAndPing(ClusterState clusterState) {
|
||||||
// remove any nodes we don't need, this will cause their FD to stop
|
// remove any nodes we don't need, this will cause their FD to stop
|
||||||
for (DiscoveryNode monitoredNode : nodesFD.keySet()) {
|
for (DiscoveryNode monitoredNode : nodesFD.keySet()) {
|
||||||
if (!clusterState.nodes().nodeExists(monitoredNode.id())) {
|
if (!clusterState.nodes().nodeExists(monitoredNode.getId())) {
|
||||||
nodesFD.remove(monitoredNode);
|
nodesFD.remove(monitoredNode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ public class NodesFaultDetection extends FaultDetection {
|
||||||
if (!running()) {
|
if (!running()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
final PingRequest pingRequest = new PingRequest(node.id(), clusterName, localNode, clusterStateVersion);
|
final PingRequest pingRequest = new PingRequest(node.getId(), clusterName, localNode, clusterStateVersion);
|
||||||
final TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout).build();
|
final TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout).build();
|
||||||
transportService.sendRequest(node, PING_ACTION_NAME, pingRequest, options, new BaseTransportResponseHandler<PingResponse>() {
|
transportService.sendRequest(node, PING_ACTION_NAME, pingRequest, options, new BaseTransportResponseHandler<PingResponse>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -255,8 +255,8 @@ public class NodesFaultDetection extends FaultDetection {
|
||||||
public void messageReceived(PingRequest request, TransportChannel channel) throws Exception {
|
public void messageReceived(PingRequest request, TransportChannel channel) throws Exception {
|
||||||
// if we are not the node we are supposed to be pinged, send an exception
|
// if we are not the node we are supposed to be pinged, send an exception
|
||||||
// this can happen when a kill -9 is sent, and another node is started using the same port
|
// this can happen when a kill -9 is sent, and another node is started using the same port
|
||||||
if (!localNode.id().equals(request.nodeId)) {
|
if (!localNode.getId().equals(request.nodeId)) {
|
||||||
throw new IllegalStateException("Got pinged as node [" + request.nodeId + "], but I am node [" + localNode.id() + "]");
|
throw new IllegalStateException("Got pinged as node [" + request.nodeId + "], but I am node [" + localNode.getId() + "]");
|
||||||
}
|
}
|
||||||
|
|
||||||
// PingRequest will have clusterName set to null if it came from a node of version <1.4.0
|
// PingRequest will have clusterName set to null if it came from a node of version <1.4.0
|
||||||
|
|
|
@ -378,9 +378,9 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
|
||||||
// to make sure we don't disconnect a true node which was temporarily removed from the DiscoveryNodes
|
// to make sure we don't disconnect a true node which was temporarily removed from the DiscoveryNodes
|
||||||
// but will be added again during the pinging. We therefore create a new temporary node
|
// but will be added again during the pinging. We therefore create a new temporary node
|
||||||
if (!nodeFoundByAddress) {
|
if (!nodeFoundByAddress) {
|
||||||
if (!nodeToSend.id().startsWith(UNICAST_NODE_PREFIX)) {
|
if (!nodeToSend.getId().startsWith(UNICAST_NODE_PREFIX)) {
|
||||||
DiscoveryNode tempNode = new DiscoveryNode("",
|
DiscoveryNode tempNode = new DiscoveryNode("",
|
||||||
UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "_" + nodeToSend.id() + "#",
|
UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "_" + nodeToSend.getId() + "#",
|
||||||
nodeToSend.getHostName(), nodeToSend.getHostAddress(), nodeToSend.getAddress(), nodeToSend.getAttributes(),
|
nodeToSend.getHostName(), nodeToSend.getHostAddress(), nodeToSend.getAddress(), nodeToSend.getAttributes(),
|
||||||
nodeToSend.getRoles(), nodeToSend.getVersion());
|
nodeToSend.getRoles(), nodeToSend.getVersion());
|
||||||
|
|
||||||
|
@ -469,7 +469,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
|
||||||
try {
|
try {
|
||||||
DiscoveryNodes discoveryNodes = contextProvider.nodes();
|
DiscoveryNodes discoveryNodes = contextProvider.nodes();
|
||||||
for (PingResponse pingResponse : response.pingResponses) {
|
for (PingResponse pingResponse : response.pingResponses) {
|
||||||
if (pingResponse.node().id().equals(discoveryNodes.localNodeId())) {
|
if (pingResponse.node().getId().equals(discoveryNodes.localNodeId())) {
|
||||||
// that's us, ignore
|
// that's us, ignore
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -179,7 +179,7 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||||
// try and serialize the cluster state once (or per version), so we don't serialize it
|
// try and serialize the cluster state once (or per version), so we don't serialize it
|
||||||
// per node when we send it over the wire, compress it while we are at it...
|
// per node when we send it over the wire, compress it while we are at it...
|
||||||
// we don't send full version if node didn't exist in the previous version of cluster state
|
// we don't send full version if node didn't exist in the previous version of cluster state
|
||||||
if (sendFullVersion || !previousState.nodes().nodeExists(node.id())) {
|
if (sendFullVersion || !previousState.nodes().nodeExists(node.getId())) {
|
||||||
sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController);
|
sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController);
|
||||||
} else {
|
} else {
|
||||||
sendClusterStateDiff(clusterState, serializedDiffs, serializedStates, node, publishTimeout, sendingController);
|
sendClusterStateDiff(clusterState, serializedDiffs, serializedStates, node, publishTimeout, sendingController);
|
||||||
|
@ -210,7 +210,7 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||||
Diff<ClusterState> diff = null;
|
Diff<ClusterState> diff = null;
|
||||||
for (final DiscoveryNode node : nodesToPublishTo) {
|
for (final DiscoveryNode node : nodesToPublishTo) {
|
||||||
try {
|
try {
|
||||||
if (sendFullVersion || !previousState.nodes().nodeExists(node.id())) {
|
if (sendFullVersion || !previousState.nodes().nodeExists(node.getId())) {
|
||||||
// will send a full reference
|
// will send a full reference
|
||||||
if (serializedStates.containsKey(node.version()) == false) {
|
if (serializedStates.containsKey(node.version()) == false) {
|
||||||
serializedStates.put(node.version(), serializeFullClusterState(clusterState, node.version()));
|
serializedStates.put(node.version(), serializeFullClusterState(clusterState, node.version()));
|
||||||
|
|
|
@ -158,12 +158,12 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||||
NodeGatewayStartedShards nodeShardState = nodesToAllocate.yesNodeShards.get(0);
|
NodeGatewayStartedShards nodeShardState = nodesToAllocate.yesNodeShards.get(0);
|
||||||
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
|
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
|
||||||
changed = true;
|
changed = true;
|
||||||
unassignedIterator.initialize(nodeShardState.getNode().id(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||||
} else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
|
} else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
|
||||||
NodeGatewayStartedShards nodeShardState = nodesToAllocate.noNodeShards.get(0);
|
NodeGatewayStartedShards nodeShardState = nodesToAllocate.noNodeShards.get(0);
|
||||||
logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
|
logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
|
||||||
changed = true;
|
changed = true;
|
||||||
unassignedIterator.initialize(nodeShardState.getNode().id(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||||
} else {
|
} else {
|
||||||
// we are throttling this, but we have enough to allocate to this node, ignore it for now
|
// we are throttling this, but we have enough to allocate to this node, ignore it for now
|
||||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards);
|
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards);
|
||||||
|
@ -187,7 +187,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||||
DiscoveryNode node = nodeShardState.getNode();
|
DiscoveryNode node = nodeShardState.getNode();
|
||||||
String allocationId = nodeShardState.allocationId();
|
String allocationId = nodeShardState.allocationId();
|
||||||
|
|
||||||
if (ignoreNodes.contains(node.id())) {
|
if (ignoreNodes.contains(node.getId())) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -272,7 +272,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||||
List<NodeGatewayStartedShards> throttledNodeShards = new ArrayList<>();
|
List<NodeGatewayStartedShards> throttledNodeShards = new ArrayList<>();
|
||||||
List<NodeGatewayStartedShards> noNodeShards = new ArrayList<>();
|
List<NodeGatewayStartedShards> noNodeShards = new ArrayList<>();
|
||||||
for (NodeGatewayStartedShards nodeShardState : nodeShardStates) {
|
for (NodeGatewayStartedShards nodeShardState : nodeShardStates) {
|
||||||
RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().id());
|
RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().getId());
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -302,7 +302,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||||
long version = nodeShardState.legacyVersion();
|
long version = nodeShardState.legacyVersion();
|
||||||
DiscoveryNode node = nodeShardState.getNode();
|
DiscoveryNode node = nodeShardState.getNode();
|
||||||
|
|
||||||
if (ignoreNodes.contains(node.id())) {
|
if (ignoreNodes.contains(node.getId())) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -164,7 +164,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
||||||
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores);
|
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores);
|
||||||
|
|
||||||
if (matchingNodes.getNodeWithHighestMatch() != null) {
|
if (matchingNodes.getNodeWithHighestMatch() != null) {
|
||||||
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().id());
|
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId());
|
||||||
// we only check on THROTTLE since we checked before before on NO
|
// we only check on THROTTLE since we checked before before on NO
|
||||||
Decision decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation);
|
Decision decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation);
|
||||||
if (decision.type() == Decision.Type.THROTTLE) {
|
if (decision.type() == Decision.Type.THROTTLE) {
|
||||||
|
@ -217,7 +217,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
||||||
*/
|
*/
|
||||||
private boolean canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) {
|
private boolean canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) {
|
||||||
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().dataNodes().values()) {
|
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().dataNodes().values()) {
|
||||||
RoutingNode node = allocation.routingNodes().node(cursor.value.id());
|
RoutingNode node = allocation.routingNodes().node(cursor.value.getId());
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -259,7 +259,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
RoutingNode node = allocation.routingNodes().node(discoNode.id());
|
RoutingNode node = allocation.routingNodes().node(discoNode.getId());
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -380,7 +380,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleResponse(PreSyncedFlushResponse response) {
|
public void handleResponse(PreSyncedFlushResponse response) {
|
||||||
Engine.CommitId existing = commitIds.putIfAbsent(node.id(), response.commitId());
|
Engine.CommitId existing = commitIds.putIfAbsent(node.getId(), response.commitId());
|
||||||
assert existing == null : "got two answers for node [" + node + "]";
|
assert existing == null : "got two answers for node [" + node + "]";
|
||||||
// count after the assert so we won't decrement twice in handleException
|
// count after the assert so we won't decrement twice in handleException
|
||||||
if (countDown.countDown()) {
|
if (countDown.countDown()) {
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
|
||||||
|
|
||||||
// starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
|
// starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
|
||||||
// the index operations will not be routed to it properly
|
// the index operations will not be routed to it properly
|
||||||
RoutingNode node = clusterService.state().getRoutingNodes().node(request.targetNode().id());
|
RoutingNode node = clusterService.state().getRoutingNodes().node(request.targetNode().getId());
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
logger.debug("delaying recovery of {} as source node {} is unknown", request.shardId(), request.targetNode());
|
logger.debug("delaying recovery of {} as source node {} is unknown", request.shardId(), request.targetNode());
|
||||||
throw new DelayRecoveryException("source node does not have the node [" + request.targetNode() + "] in its state yet..");
|
throw new DelayRecoveryException("source node does not have the node [" + request.targetNode() + "] in its state yet..");
|
||||||
|
|
|
@ -316,7 +316,7 @@ public class RecoveryState implements ToXContent, Streamable {
|
||||||
restoreSource.toXContent(builder, params);
|
restoreSource.toXContent(builder, params);
|
||||||
} else {
|
} else {
|
||||||
builder.startObject(Fields.SOURCE);
|
builder.startObject(Fields.SOURCE);
|
||||||
builder.field(Fields.ID, sourceNode.id());
|
builder.field(Fields.ID, sourceNode.getId());
|
||||||
builder.field(Fields.HOST, sourceNode.getHostName());
|
builder.field(Fields.HOST, sourceNode.getHostName());
|
||||||
builder.field(Fields.TRANSPORT_ADDRESS, sourceNode.address().toString());
|
builder.field(Fields.TRANSPORT_ADDRESS, sourceNode.address().toString());
|
||||||
builder.field(Fields.IP, sourceNode.getHostAddress());
|
builder.field(Fields.IP, sourceNode.getHostAddress());
|
||||||
|
@ -325,7 +325,7 @@ public class RecoveryState implements ToXContent, Streamable {
|
||||||
}
|
}
|
||||||
|
|
||||||
builder.startObject(Fields.TARGET);
|
builder.startObject(Fields.TARGET);
|
||||||
builder.field(Fields.ID, targetNode.id());
|
builder.field(Fields.ID, targetNode.getId());
|
||||||
builder.field(Fields.HOST, targetNode.getHostName());
|
builder.field(Fields.HOST, targetNode.getHostName());
|
||||||
builder.field(Fields.TRANSPORT_ADDRESS, targetNode.address().toString());
|
builder.field(Fields.TRANSPORT_ADDRESS, targetNode.address().toString());
|
||||||
builder.field(Fields.IP, targetNode.getHostAddress());
|
builder.field(Fields.IP, targetNode.getHostAddress());
|
||||||
|
|
|
@ -160,7 +160,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if shard is active on the current node or is getting relocated to the our node
|
// check if shard is active on the current node or is getting relocated to the our node
|
||||||
String localNodeId = state.getNodes().localNode().id();
|
String localNodeId = state.getNodes().localNode().getId();
|
||||||
if (localNodeId.equals(shardRouting.currentNodeId()) || localNodeId.equals(shardRouting.relocatingNodeId())) {
|
if (localNodeId.equals(shardRouting.currentNodeId()) || localNodeId.equals(shardRouting.relocatingNodeId())) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,7 +85,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
|
||||||
doVerify(repository, verificationToken);
|
doVerify(repository, verificationToken);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
logger.warn("[{}] failed to verify repository", t, repository);
|
logger.warn("[{}] failed to verify repository", t, repository);
|
||||||
errors.add(new VerificationFailure(node.id(), t));
|
errors.add(new VerificationFailure(node.getId(), t));
|
||||||
}
|
}
|
||||||
if (counter.decrementAndGet() == 0) {
|
if (counter.decrementAndGet() == 0) {
|
||||||
finishVerification(listener, nodes, errors);
|
finishVerification(listener, nodes, errors);
|
||||||
|
@ -101,7 +101,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
errors.add(new VerificationFailure(node.id(), exp));
|
errors.add(new VerificationFailure(node.getId(), exp));
|
||||||
if (counter.decrementAndGet() == 0) {
|
if (counter.decrementAndGet() == 0) {
|
||||||
finishVerification(listener, nodes, errors);
|
finishVerification(listener, nodes, errors);
|
||||||
}
|
}
|
||||||
|
|
|
@ -120,7 +120,7 @@ public class RestAllocationAction extends AbstractCatAction {
|
||||||
for (NodeStats nodeStats : stats.getNodes()) {
|
for (NodeStats nodeStats : stats.getNodes()) {
|
||||||
DiscoveryNode node = nodeStats.getNode();
|
DiscoveryNode node = nodeStats.getNode();
|
||||||
|
|
||||||
int shardCount = allocs.getOrDefault(node.id(), 0);
|
int shardCount = allocs.getOrDefault(node.getId(), 0);
|
||||||
|
|
||||||
ByteSizeValue total = nodeStats.getFs().getTotal().getTotal();
|
ByteSizeValue total = nodeStats.getFs().getTotal().getTotal();
|
||||||
ByteSizeValue avail = nodeStats.getFs().getTotal().getAvailable();
|
ByteSizeValue avail = nodeStats.getFs().getTotal().getAvailable();
|
||||||
|
|
|
@ -124,7 +124,7 @@ public class RestFielddataAction extends AbstractCatAction {
|
||||||
table.startRow();
|
table.startRow();
|
||||||
// add the node info and field data total before each individual field
|
// add the node info and field data total before each individual field
|
||||||
NodeStats ns = statsEntry.getKey();
|
NodeStats ns = statsEntry.getKey();
|
||||||
table.addCell(ns.getNode().id());
|
table.addCell(ns.getNode().getId());
|
||||||
table.addCell(ns.getNode().getHostName());
|
table.addCell(ns.getNode().getHostName());
|
||||||
table.addCell(ns.getNode().getHostAddress());
|
table.addCell(ns.getNode().getHostAddress());
|
||||||
table.addCell(ns.getNode().getName());
|
table.addCell(ns.getNode().getName());
|
||||||
|
|
|
@ -102,11 +102,11 @@ public class RestNodeAttrsAction extends AbstractCatAction {
|
||||||
Table table = getTableWithHeader(req);
|
Table table = getTableWithHeader(req);
|
||||||
|
|
||||||
for (DiscoveryNode node : nodes) {
|
for (DiscoveryNode node : nodes) {
|
||||||
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
|
NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
|
||||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||||
table.startRow();
|
table.startRow();
|
||||||
table.addCell(node.name());
|
table.addCell(node.name());
|
||||||
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
|
table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4));
|
||||||
table.addCell(info == null ? null : info.getProcess().getId());
|
table.addCell(info == null ? null : info.getProcess().getId());
|
||||||
table.addCell(node.getHostName());
|
table.addCell(node.getHostName());
|
||||||
table.addCell(node.getHostAddress());
|
table.addCell(node.getHostAddress());
|
||||||
|
|
|
@ -227,8 +227,8 @@ public class RestNodesAction extends AbstractCatAction {
|
||||||
Table table = getTableWithHeader(req);
|
Table table = getTableWithHeader(req);
|
||||||
|
|
||||||
for (DiscoveryNode node : nodes) {
|
for (DiscoveryNode node : nodes) {
|
||||||
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
|
NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
|
||||||
NodeStats stats = nodesStats.getNodesMap().get(node.id());
|
NodeStats stats = nodesStats.getNodesMap().get(node.getId());
|
||||||
|
|
||||||
JvmInfo jvmInfo = info == null ? null : info.getJvm();
|
JvmInfo jvmInfo = info == null ? null : info.getJvm();
|
||||||
JvmStats jvmStats = stats == null ? null : stats.getJvm();
|
JvmStats jvmStats = stats == null ? null : stats.getJvm();
|
||||||
|
@ -239,7 +239,7 @@ public class RestNodesAction extends AbstractCatAction {
|
||||||
|
|
||||||
table.startRow();
|
table.startRow();
|
||||||
|
|
||||||
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
|
table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4));
|
||||||
table.addCell(info == null ? null : info.getProcess().getId());
|
table.addCell(info == null ? null : info.getProcess().getId());
|
||||||
table.addCell(node.getHostAddress());
|
table.addCell(node.getHostAddress());
|
||||||
if (node.address() instanceof InetSocketTransportAddress) {
|
if (node.address() instanceof InetSocketTransportAddress) {
|
||||||
|
@ -287,7 +287,7 @@ public class RestNodesAction extends AbstractCatAction {
|
||||||
roles = node.getRoles().stream().map(DiscoveryNode.Role::getAbbreviation).collect(Collectors.joining());
|
roles = node.getRoles().stream().map(DiscoveryNode.Role::getAbbreviation).collect(Collectors.joining());
|
||||||
}
|
}
|
||||||
table.addCell(roles);
|
table.addCell(roles);
|
||||||
table.addCell(masterId == null ? "x" : masterId.equals(node.id()) ? "*" : "-");
|
table.addCell(masterId == null ? "x" : masterId.equals(node.getId()) ? "*" : "-");
|
||||||
table.addCell(node.name());
|
table.addCell(node.name());
|
||||||
|
|
||||||
CompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion();
|
CompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion();
|
||||||
|
|
|
@ -94,11 +94,11 @@ public class RestPluginsAction extends AbstractCatAction {
|
||||||
Table table = getTableWithHeader(req);
|
Table table = getTableWithHeader(req);
|
||||||
|
|
||||||
for (DiscoveryNode node : nodes) {
|
for (DiscoveryNode node : nodes) {
|
||||||
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
|
NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
|
||||||
|
|
||||||
for (PluginInfo pluginInfo : info.getPlugins().getPluginInfos()) {
|
for (PluginInfo pluginInfo : info.getPlugins().getPluginInfos()) {
|
||||||
table.startRow();
|
table.startRow();
|
||||||
table.addCell(node.id());
|
table.addCell(node.getId());
|
||||||
table.addCell(node.name());
|
table.addCell(node.name());
|
||||||
table.addCell(pluginInfo.getName());
|
table.addCell(pluginInfo.getName());
|
||||||
table.addCell(pluginInfo.getVersion());
|
table.addCell(pluginInfo.getVersion());
|
||||||
|
|
|
@ -226,11 +226,11 @@ public class RestThreadPoolAction extends AbstractCatAction {
|
||||||
Table table = getTableWithHeader(req);
|
Table table = getTableWithHeader(req);
|
||||||
|
|
||||||
for (DiscoveryNode node : nodes) {
|
for (DiscoveryNode node : nodes) {
|
||||||
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
|
NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
|
||||||
NodeStats stats = nodesStats.getNodesMap().get(node.id());
|
NodeStats stats = nodesStats.getNodesMap().get(node.getId());
|
||||||
table.startRow();
|
table.startRow();
|
||||||
|
|
||||||
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
|
table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4));
|
||||||
table.addCell(info == null ? null : info.getProcess().getId());
|
table.addCell(info == null ? null : info.getProcess().getId());
|
||||||
table.addCell(node.getHostName());
|
table.addCell(node.getHostName());
|
||||||
table.addCell(node.getHostAddress());
|
table.addCell(node.getHostAddress());
|
||||||
|
|
|
@ -542,7 +542,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
||||||
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException {
|
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException {
|
||||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||||
IndexShard indexShard = indexService.getShard(request.shardId().getId());
|
IndexShard indexShard = indexService.getShard(request.shardId().getId());
|
||||||
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId());
|
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
|
||||||
|
|
||||||
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
|
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
|
||||||
|
|
||||||
|
|
|
@ -207,7 +207,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent<SnapshotSh
|
||||||
// snapshots in the future
|
// snapshots in the future
|
||||||
Map<SnapshotId, Map<ShardId, IndexShardSnapshotStatus>> newSnapshots = new HashMap<>();
|
Map<SnapshotId, Map<ShardId, IndexShardSnapshotStatus>> newSnapshots = new HashMap<>();
|
||||||
// Now go through all snapshots and update existing or create missing
|
// Now go through all snapshots and update existing or create missing
|
||||||
final String localNodeId = clusterService.localNode().id();
|
final String localNodeId = clusterService.localNode().getId();
|
||||||
if (snapshotsInProgress != null) {
|
if (snapshotsInProgress != null) {
|
||||||
for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) {
|
for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) {
|
||||||
if (entry.state() == SnapshotsInProgress.State.STARTED) {
|
if (entry.state() == SnapshotsInProgress.State.STARTED) {
|
||||||
|
|
|
@ -951,7 +951,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
globalLock.readLock().lock();
|
globalLock.readLock().lock();
|
||||||
try {
|
try {
|
||||||
|
|
||||||
try (Releasable ignored = connectionLock.acquire(node.id())) {
|
try (Releasable ignored = connectionLock.acquire(node.getId())) {
|
||||||
if (!lifecycle.started()) {
|
if (!lifecycle.started()) {
|
||||||
throw new IllegalStateException("can't add nodes to a stopped transport");
|
throw new IllegalStateException("can't add nodes to a stopped transport");
|
||||||
}
|
}
|
||||||
|
@ -1109,7 +1109,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
@Override
|
@Override
|
||||||
public void disconnectFromNode(DiscoveryNode node) {
|
public void disconnectFromNode(DiscoveryNode node) {
|
||||||
|
|
||||||
try (Releasable ignored = connectionLock.acquire(node.id())) {
|
try (Releasable ignored = connectionLock.acquire(node.getId())) {
|
||||||
NodeChannels nodeChannels = connectedNodes.remove(node);
|
NodeChannels nodeChannels = connectedNodes.remove(node);
|
||||||
if (nodeChannels != null) {
|
if (nodeChannels != null) {
|
||||||
try {
|
try {
|
||||||
|
@ -1131,7 +1131,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
// check outside of the lock
|
// check outside of the lock
|
||||||
NodeChannels nodeChannels = connectedNodes.get(node);
|
NodeChannels nodeChannels = connectedNodes.get(node);
|
||||||
if (nodeChannels != null && nodeChannels.hasChannel(channel)) {
|
if (nodeChannels != null && nodeChannels.hasChannel(channel)) {
|
||||||
try (Releasable ignored = connectionLock.acquire(node.id())) {
|
try (Releasable ignored = connectionLock.acquire(node.getId())) {
|
||||||
nodeChannels = connectedNodes.get(node);
|
nodeChannels = connectedNodes.get(node);
|
||||||
// check again within the connection lock, if its still applicable to remove it
|
// check again within the connection lock, if its still applicable to remove it
|
||||||
if (nodeChannels != null && nodeChannels.hasChannel(channel)) {
|
if (nodeChannels != null && nodeChannels.hasChannel(channel)) {
|
||||||
|
|
|
@ -353,20 +353,20 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
||||||
for (DiscoveryNode discoNode : currentState.nodes()) {
|
for (DiscoveryNode discoNode : currentState.nodes()) {
|
||||||
String markedTribeName = discoNode.getAttributes().get(TRIBE_NAME_SETTING.getKey());
|
String markedTribeName = discoNode.getAttributes().get(TRIBE_NAME_SETTING.getKey());
|
||||||
if (markedTribeName != null && markedTribeName.equals(tribeName)) {
|
if (markedTribeName != null && markedTribeName.equals(tribeName)) {
|
||||||
if (tribeState.nodes().get(discoNode.id()) == null) {
|
if (tribeState.nodes().get(discoNode.getId()) == null) {
|
||||||
clusterStateChanged = true;
|
clusterStateChanged = true;
|
||||||
logger.info("[{}] removing node [{}]", tribeName, discoNode);
|
logger.info("[{}] removing node [{}]", tribeName, discoNode);
|
||||||
nodes.remove(discoNode.id());
|
nodes.remove(discoNode.getId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// go over tribe nodes, and see if they need to be added
|
// go over tribe nodes, and see if they need to be added
|
||||||
for (DiscoveryNode tribe : tribeState.nodes()) {
|
for (DiscoveryNode tribe : tribeState.nodes()) {
|
||||||
if (currentState.nodes().get(tribe.id()) == null) {
|
if (currentState.nodes().get(tribe.getId()) == null) {
|
||||||
// a new node, add it, but also add the tribe name to the attributes
|
// a new node, add it, but also add the tribe name to the attributes
|
||||||
Map<String, String> tribeAttr = new HashMap<>(tribe.getAttributes());
|
Map<String, String> tribeAttr = new HashMap<>(tribe.getAttributes());
|
||||||
tribeAttr.put(TRIBE_NAME_SETTING.getKey(), tribeName);
|
tribeAttr.put(TRIBE_NAME_SETTING.getKey(), tribeName);
|
||||||
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(),
|
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.getId(), tribe.getHostName(), tribe.getHostAddress(),
|
||||||
tribe.address(), unmodifiableMap(tribeAttr), tribe.getRoles(), tribe.version());
|
tribe.address(), unmodifiableMap(tribeAttr), tribe.getRoles(), tribe.version());
|
||||||
clusterStateChanged = true;
|
clusterStateChanged = true;
|
||||||
logger.info("[{}] adding node [{}]", tribeName, discoNode);
|
logger.info("[{}] adding node [{}]", tribeName, discoNode);
|
||||||
|
|
|
@ -111,7 +111,7 @@ public class IndicesShardStoreResponseTests extends ESTestCase {
|
||||||
assertThat(eitherLegacyVersionOrAllocationIdSet, equalTo(true));
|
assertThat(eitherLegacyVersionOrAllocationIdSet, equalTo(true));
|
||||||
assertThat(storeInfo.containsKey("allocation"), equalTo(true));
|
assertThat(storeInfo.containsKey("allocation"), equalTo(true));
|
||||||
assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocationStatus().value()));
|
assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocationStatus().value()));
|
||||||
assertThat(storeInfo.containsKey(storeStatus.getNode().id()), equalTo(true));
|
assertThat(storeInfo.containsKey(storeStatus.getNode().getId()), equalTo(true));
|
||||||
if (storeStatus.getStoreException() != null) {
|
if (storeStatus.getStoreException() != null) {
|
||||||
assertThat(storeInfo.containsKey("store_exception"), equalTo(true));
|
assertThat(storeInfo.containsKey("store_exception"), equalTo(true));
|
||||||
}
|
}
|
||||||
|
|
|
@ -224,14 +224,14 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
|
||||||
totalIndexShards += numberOfShards;
|
totalIndexShards += numberOfShards;
|
||||||
for (int j = 0; j < numberOfShards; j++) {
|
for (int j = 0; j < numberOfShards; j++) {
|
||||||
final ShardId shardId = new ShardId(index, "_na_", ++shardIndex);
|
final ShardId shardId = new ShardId(index, "_na_", ++shardIndex);
|
||||||
ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.id(), true, ShardRoutingState.STARTED);
|
ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.getId(), true, ShardRoutingState.STARTED);
|
||||||
IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(shardId);
|
IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(shardId);
|
||||||
indexShard.addShard(shard);
|
indexShard.addShard(shard);
|
||||||
indexRoutingTable.addIndexShard(indexShard.build());
|
indexRoutingTable.addIndexShard(indexShard.build());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
discoBuilder.localNodeId(newNode(0).id());
|
discoBuilder.localNodeId(newNode(0).getId());
|
||||||
discoBuilder.masterNodeId(newNode(numberOfNodes - 1).id());
|
discoBuilder.masterNodeId(newNode(numberOfNodes - 1).getId());
|
||||||
ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName(TEST_CLUSTER));
|
ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName(TEST_CLUSTER));
|
||||||
stateBuilder.nodes(discoBuilder);
|
stateBuilder.nodes(discoBuilder);
|
||||||
final IndexMetaData.Builder indexMetaData = IndexMetaData.builder(index)
|
final IndexMetaData.Builder indexMetaData = IndexMetaData.builder(index)
|
||||||
|
@ -320,7 +320,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
|
||||||
|
|
||||||
DiscoveryNode masterNode = clusterService.state().nodes().masterNode();
|
DiscoveryNode masterNode = clusterService.state().nodes().masterNode();
|
||||||
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes());
|
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes());
|
||||||
builder.remove(masterNode.id());
|
builder.remove(masterNode.getId());
|
||||||
|
|
||||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder));
|
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder));
|
||||||
|
|
||||||
|
@ -332,7 +332,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
|
||||||
ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[]{TEST_INDEX});
|
ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[]{TEST_INDEX});
|
||||||
Set<String> set = new HashSet<>();
|
Set<String> set = new HashSet<>();
|
||||||
for (ShardRouting shard : shardIt.asUnordered()) {
|
for (ShardRouting shard : shardIt.asUnordered()) {
|
||||||
if (!shard.currentNodeId().equals(masterNode.id())) {
|
if (!shard.currentNodeId().equals(masterNode.getId())) {
|
||||||
set.add(shard.currentNodeId());
|
set.add(shard.currentNodeId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -405,7 +405,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
|
||||||
if (simulateFailedMasterNode) {
|
if (simulateFailedMasterNode) {
|
||||||
failedMasterNode = clusterService.state().nodes().masterNode();
|
failedMasterNode = clusterService.state().nodes().masterNode();
|
||||||
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes());
|
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes());
|
||||||
builder.remove(failedMasterNode.id());
|
builder.remove(failedMasterNode.getId());
|
||||||
builder.masterNodeId(null);
|
builder.masterNodeId(null);
|
||||||
|
|
||||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder));
|
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder));
|
||||||
|
@ -453,7 +453,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (simulateFailedMasterNode) {
|
if (simulateFailedMasterNode) {
|
||||||
totalShards += map.get(failedMasterNode.id()).size();
|
totalShards += map.get(failedMasterNode.getId()).size();
|
||||||
}
|
}
|
||||||
|
|
||||||
Response response = listener.get();
|
Response response = listener.get();
|
||||||
|
|
|
@ -135,8 +135,8 @@ public class TransportNodesActionTests extends ESTestCase {
|
||||||
discoBuilder = discoBuilder.put(node);
|
discoBuilder = discoBuilder.put(node);
|
||||||
discoveryNodes.add(node);
|
discoveryNodes.add(node);
|
||||||
}
|
}
|
||||||
discoBuilder.localNodeId(randomFrom(discoveryNodes).id());
|
discoBuilder.localNodeId(randomFrom(discoveryNodes).getId());
|
||||||
discoBuilder.masterNodeId(randomFrom(discoveryNodes).id());
|
discoBuilder.masterNodeId(randomFrom(discoveryNodes).getId());
|
||||||
ClusterState.Builder stateBuilder = ClusterState.builder(CLUSTER_NAME);
|
ClusterState.Builder stateBuilder = ClusterState.builder(CLUSTER_NAME);
|
||||||
stateBuilder.nodes(discoBuilder);
|
stateBuilder.nodes(discoBuilder);
|
||||||
ClusterState clusterState = stateBuilder.build();
|
ClusterState clusterState = stateBuilder.build();
|
||||||
|
|
|
@ -82,10 +82,10 @@ public class ClusterStateCreationUtils {
|
||||||
for (int i = 0; i < numberOfNodes + 1; i++) {
|
for (int i = 0; i < numberOfNodes + 1; i++) {
|
||||||
final DiscoveryNode node = newNode(i);
|
final DiscoveryNode node = newNode(i);
|
||||||
discoBuilder = discoBuilder.put(node);
|
discoBuilder = discoBuilder.put(node);
|
||||||
unassignedNodes.add(node.id());
|
unassignedNodes.add(node.getId());
|
||||||
}
|
}
|
||||||
discoBuilder.localNodeId(newNode(0).id());
|
discoBuilder.localNodeId(newNode(0).getId());
|
||||||
discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures
|
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures
|
||||||
final int primaryTerm = randomInt(200);
|
final int primaryTerm = randomInt(200);
|
||||||
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
|
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
|
||||||
.put(SETTING_VERSION_CREATED, Version.CURRENT)
|
.put(SETTING_VERSION_CREATED, Version.CURRENT)
|
||||||
|
@ -101,11 +101,11 @@ public class ClusterStateCreationUtils {
|
||||||
UnassignedInfo unassignedInfo = null;
|
UnassignedInfo unassignedInfo = null;
|
||||||
if (primaryState != ShardRoutingState.UNASSIGNED) {
|
if (primaryState != ShardRoutingState.UNASSIGNED) {
|
||||||
if (activePrimaryLocal) {
|
if (activePrimaryLocal) {
|
||||||
primaryNode = newNode(0).id();
|
primaryNode = newNode(0).getId();
|
||||||
unassignedNodes.remove(primaryNode);
|
unassignedNodes.remove(primaryNode);
|
||||||
} else {
|
} else {
|
||||||
Set<String> unassignedNodesExecludingPrimary = new HashSet<>(unassignedNodes);
|
Set<String> unassignedNodesExecludingPrimary = new HashSet<>(unassignedNodes);
|
||||||
unassignedNodesExecludingPrimary.remove(newNode(0).id());
|
unassignedNodesExecludingPrimary.remove(newNode(0).getId());
|
||||||
primaryNode = selectAndRemove(unassignedNodesExecludingPrimary);
|
primaryNode = selectAndRemove(unassignedNodesExecludingPrimary);
|
||||||
}
|
}
|
||||||
if (primaryState == ShardRoutingState.RELOCATING) {
|
if (primaryState == ShardRoutingState.RELOCATING) {
|
||||||
|
@ -154,8 +154,8 @@ public class ClusterStateCreationUtils {
|
||||||
final DiscoveryNode node = newNode(i);
|
final DiscoveryNode node = newNode(i);
|
||||||
discoBuilder = discoBuilder.put(node);
|
discoBuilder = discoBuilder.put(node);
|
||||||
}
|
}
|
||||||
discoBuilder.localNodeId(newNode(0).id());
|
discoBuilder.localNodeId(newNode(0).getId());
|
||||||
discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures
|
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures
|
||||||
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
|
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
|
||||||
.put(SETTING_VERSION_CREATED, Version.CURRENT)
|
.put(SETTING_VERSION_CREATED, Version.CURRENT)
|
||||||
.put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1)
|
.put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1)
|
||||||
|
@ -169,9 +169,9 @@ public class ClusterStateCreationUtils {
|
||||||
routing.addAsNew(indexMetaData);
|
routing.addAsNew(indexMetaData);
|
||||||
final ShardId shardId = new ShardId(index, "_na_", i);
|
final ShardId shardId = new ShardId(index, "_na_", i);
|
||||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
|
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
|
||||||
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).id(), null, null, true,
|
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).getId(), null, null, true,
|
||||||
ShardRoutingState.STARTED, null));
|
ShardRoutingState.STARTED, null));
|
||||||
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).id(), null, null, false,
|
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).getId(), null, null, false,
|
||||||
ShardRoutingState.STARTED, null));
|
ShardRoutingState.STARTED, null));
|
||||||
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
|
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
|
||||||
}
|
}
|
||||||
|
@ -221,8 +221,8 @@ public class ClusterStateCreationUtils {
|
||||||
public static ClusterState stateWithNoShard() {
|
public static ClusterState stateWithNoShard() {
|
||||||
int numberOfNodes = 2;
|
int numberOfNodes = 2;
|
||||||
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
|
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
|
||||||
discoBuilder.localNodeId(newNode(0).id());
|
discoBuilder.localNodeId(newNode(0).getId());
|
||||||
discoBuilder.masterNodeId(newNode(1).id());
|
discoBuilder.masterNodeId(newNode(1).getId());
|
||||||
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
|
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
|
||||||
state.nodes(discoBuilder);
|
state.nodes(discoBuilder);
|
||||||
state.metaData(MetaData.builder().generateClusterUuidIfNeeded());
|
state.metaData(MetaData.builder().generateClusterUuidIfNeeded());
|
||||||
|
@ -244,9 +244,9 @@ public class ClusterStateCreationUtils {
|
||||||
discoBuilder.put(node);
|
discoBuilder.put(node);
|
||||||
}
|
}
|
||||||
if (masterNode != null) {
|
if (masterNode != null) {
|
||||||
discoBuilder.masterNodeId(masterNode.id());
|
discoBuilder.masterNodeId(masterNode.getId());
|
||||||
}
|
}
|
||||||
discoBuilder.localNodeId(localNode.id());
|
discoBuilder.localNodeId(localNode.getId());
|
||||||
|
|
||||||
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
|
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
|
||||||
state.nodes(discoBuilder);
|
state.nodes(discoBuilder);
|
||||||
|
|
|
@ -768,7 +768,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
||||||
assertEquals(1, shardFailedRequests.length);
|
assertEquals(1, shardFailedRequests.length);
|
||||||
CapturingTransport.CapturedRequest shardFailedRequest = shardFailedRequests[0];
|
CapturingTransport.CapturedRequest shardFailedRequest = shardFailedRequests[0];
|
||||||
// get the shard the request was sent to
|
// get the shard the request was sent to
|
||||||
ShardRouting routing = clusterService.state().getRoutingNodes().node(capturedRequest.node.id()).get(request.shardId.id());
|
ShardRouting routing = clusterService.state().getRoutingNodes().node(capturedRequest.node.getId()).get(request.shardId.id());
|
||||||
// and the shard that was requested to be failed
|
// and the shard that was requested to be failed
|
||||||
ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry) shardFailedRequest.request;
|
ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry) shardFailedRequest.request;
|
||||||
// the shard the request was sent to and the shard to be failed should be the same
|
// the shard the request was sent to and the shard to be failed should be the same
|
||||||
|
|
|
@ -72,7 +72,7 @@ public class TransportClientIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (DiscoveryNode discoveryNode : nodeService.listedNodes()) {
|
for (DiscoveryNode discoveryNode : nodeService.listedNodes()) {
|
||||||
assertThat(discoveryNode.id(), startsWith("#transport#-"));
|
assertThat(discoveryNode.getId(), startsWith("#transport#-"));
|
||||||
assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT.minimumCompatibilityVersion()));
|
assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT.minimumCompatibilityVersion()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
|
||||||
emptyMap(), emptySet(), Version.CURRENT);
|
emptyMap(), emptySet(), Version.CURRENT);
|
||||||
DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"),
|
DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"),
|
||||||
emptyMap(), emptySet(), Version.CURRENT);
|
emptyMap(), emptySet(), Version.CURRENT);
|
||||||
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.id()).build();
|
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.getId()).build();
|
||||||
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
|
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
|
||||||
ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode);
|
ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode);
|
||||||
|
|
||||||
|
|
|
@ -36,9 +36,9 @@ public class ClusterStateTests extends ESTestCase {
|
||||||
final DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build();
|
final DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build();
|
||||||
ClusterState noMaster1 = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(nodes).build();
|
ClusterState noMaster1 = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(nodes).build();
|
||||||
ClusterState noMaster2 = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(nodes).build();
|
ClusterState noMaster2 = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(nodes).build();
|
||||||
ClusterState withMaster1a = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.id())).build();
|
ClusterState withMaster1a = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId())).build();
|
||||||
ClusterState withMaster1b = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.id())).build();
|
ClusterState withMaster1b = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId())).build();
|
||||||
ClusterState withMaster2 = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node2.id())).build();
|
ClusterState withMaster2 = ClusterState.builder(ClusterName.DEFAULT).version(randomInt(5)).nodes(DiscoveryNodes.builder(nodes).masterNodeId(node2.getId())).build();
|
||||||
|
|
||||||
// states with no master should never supersede anything
|
// states with no master should never supersede anything
|
||||||
assertFalse(noMaster1.supersedes(noMaster2));
|
assertFalse(noMaster1.supersedes(noMaster2));
|
||||||
|
|
|
@ -84,7 +84,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase {
|
||||||
String excludedNodeId = null;
|
String excludedNodeId = null;
|
||||||
for (NodeInfo nodeInfo : nodesInfo) {
|
for (NodeInfo nodeInfo : nodesInfo) {
|
||||||
if (nodeInfo.getNode().isDataNode()) {
|
if (nodeInfo.getNode().isDataNode()) {
|
||||||
excludedNodeId = nodeInfo.getNode().id();
|
excludedNodeId = nodeInfo.getNode().getId();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -102,7 +102,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase {
|
||||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||||
assert clusterState.nodes() != null;
|
assert clusterState.nodes() != null;
|
||||||
if (shardRouting.unassigned() == false && clusterState.nodes().get(shardRouting.currentNodeId()).id().equals(excludedNodeId)) {
|
if (shardRouting.unassigned() == false && clusterState.nodes().get(shardRouting.currentNodeId()).getId().equals(excludedNodeId)) {
|
||||||
//if the shard is still there it must be relocating and all nodes need to know, since the request was acknowledged
|
//if the shard is still there it must be relocating and all nodes need to know, since the request was acknowledged
|
||||||
//reroute happens as part of the update settings and we made sure no throttling comes into the picture via settings
|
//reroute happens as part of the update settings and we made sure no throttling comes into the picture via settings
|
||||||
assertThat(shardRouting.relocating(), equalTo(true));
|
assertThat(shardRouting.relocating(), equalTo(true));
|
||||||
|
@ -127,7 +127,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase {
|
||||||
String excludedNodeId = null;
|
String excludedNodeId = null;
|
||||||
for (NodeInfo nodeInfo : nodesInfo) {
|
for (NodeInfo nodeInfo : nodesInfo) {
|
||||||
if (nodeInfo.getNode().isDataNode()) {
|
if (nodeInfo.getNode().isDataNode()) {
|
||||||
excludedNodeId = nodeInfo.getNode().id();
|
excludedNodeId = nodeInfo.getNode().getId();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -375,7 +375,7 @@ public class ShardStateActionTests extends ESTestCase {
|
||||||
private void setUpMasterRetryVerification(int numberOfRetries, AtomicInteger retries, CountDownLatch latch, LongConsumer retryLoop) {
|
private void setUpMasterRetryVerification(int numberOfRetries, AtomicInteger retries, CountDownLatch latch, LongConsumer retryLoop) {
|
||||||
shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> {
|
shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> {
|
||||||
DiscoveryNodes.Builder masterBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
|
DiscoveryNodes.Builder masterBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
|
||||||
masterBuilder.masterNodeId(clusterService.state().nodes().masterNodes().iterator().next().value.id());
|
masterBuilder.masterNodeId(clusterService.state().nodes().masterNodes().iterator().next().value.getId());
|
||||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(masterBuilder));
|
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(masterBuilder));
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -22,10 +22,8 @@ package org.elasticsearch.cluster.allocation;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||||
|
@ -107,7 +105,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
.setDryRun(true)
|
.setDryRun(true)
|
||||||
.execute().actionGet().getState();
|
.execute().actionGet().getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
|
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
|
||||||
|
|
||||||
logger.info("--> get the state, verify nothing changed because of the dry run");
|
logger.info("--> get the state, verify nothing changed because of the dry run");
|
||||||
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||||
|
@ -119,7 +117,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
||||||
.execute().actionGet().getState();
|
.execute().actionGet().getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
|
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
|
||||||
|
|
||||||
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
|
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
|
||||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||||
|
@ -127,7 +125,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
logger.info("--> get the state, verify shard 1 primary allocated");
|
logger.info("--> get the state, verify shard 1 primary allocated");
|
||||||
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
|
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.STARTED));
|
||||||
|
|
||||||
logger.info("--> move shard 1 primary from node1 to node2");
|
logger.info("--> move shard 1 primary from node1 to node2");
|
||||||
state = client().admin().cluster().prepareReroute()
|
state = client().admin().cluster().prepareReroute()
|
||||||
|
@ -135,8 +133,8 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
.add(new MoveAllocationCommand("test", 0, node_1, node_2))
|
.add(new MoveAllocationCommand("test", 0, node_1, node_2))
|
||||||
.execute().actionGet().getState();
|
.execute().actionGet().getState();
|
||||||
|
|
||||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.RELOCATING));
|
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.RELOCATING));
|
||||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
|
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
|
||||||
|
|
||||||
|
|
||||||
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet();
|
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet();
|
||||||
|
@ -145,7 +143,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
logger.info("--> get the state, verify shard 1 primary moved from node1 to node2");
|
logger.info("--> get the state, verify shard 1 primary moved from node1 to node2");
|
||||||
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
|
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).get(0).state(), equalTo(ShardRoutingState.STARTED));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testRerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception {
|
public void testRerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception {
|
||||||
|
@ -218,7 +216,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
||||||
.execute().actionGet().getState();
|
.execute().actionGet().getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
|
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
|
||||||
|
|
||||||
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
|
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
|
||||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||||
|
@ -226,7 +224,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
logger.info("--> get the state, verify shard 1 primary allocated");
|
logger.info("--> get the state, verify shard 1 primary allocated");
|
||||||
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
|
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.STARTED));
|
||||||
|
|
||||||
client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet();
|
client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet();
|
||||||
final Index index = resolveIndex("test");
|
final Index index = resolveIndex("test");
|
||||||
|
@ -253,7 +251,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
||||||
.execute().actionGet().getState();
|
.execute().actionGet().getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
|
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
|
||||||
|
|
||||||
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
|
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
|
||||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||||
|
@ -261,7 +259,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
logger.info("--> get the state, verify shard 1 primary allocated");
|
logger.info("--> get the state, verify shard 1 primary allocated");
|
||||||
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
|
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).get(0).state(), equalTo(ShardRoutingState.STARTED));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,8 +41,8 @@ public class DiscoveryNodesTests extends ESTestCase {
|
||||||
DiscoveryNodes discoveryNodes = buildDiscoveryNodes();
|
DiscoveryNodes discoveryNodes = buildDiscoveryNodes();
|
||||||
DiscoveryNode[] nodes = discoveryNodes.nodes().values().toArray(DiscoveryNode.class);
|
DiscoveryNode[] nodes = discoveryNodes.nodes().values().toArray(DiscoveryNode.class);
|
||||||
DiscoveryNode node = randomFrom(nodes);
|
DiscoveryNode node = randomFrom(nodes);
|
||||||
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(randomBoolean() ? node.id() : node.name());
|
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(randomBoolean() ? node.getId() : node.name());
|
||||||
assertThat(resolvedNode.id(), equalTo(node.id()));
|
assertThat(resolvedNode.getId(), equalTo(node.getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testResolveNodeByAttribute() {
|
public void testResolveNodeByAttribute() {
|
||||||
|
@ -52,7 +52,7 @@ public class DiscoveryNodesTests extends ESTestCase {
|
||||||
try {
|
try {
|
||||||
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(nodeSelector.selector);
|
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(nodeSelector.selector);
|
||||||
assertThat(matchingNodeIds.size(), equalTo(1));
|
assertThat(matchingNodeIds.size(), equalTo(1));
|
||||||
assertThat(resolvedNode.id(), equalTo(matchingNodeIds.iterator().next()));
|
assertThat(resolvedNode.getId(), equalTo(matchingNodeIds.iterator().next()));
|
||||||
} catch(IllegalArgumentException e) {
|
} catch(IllegalArgumentException e) {
|
||||||
if (matchingNodeIds.size() == 0) {
|
if (matchingNodeIds.size() == 0) {
|
||||||
assertThat(e.getMessage(), equalTo("failed to resolve [" + nodeSelector.selector + "], no matching nodes"));
|
assertThat(e.getMessage(), equalTo("failed to resolve [" + nodeSelector.selector + "], no matching nodes"));
|
||||||
|
@ -88,7 +88,7 @@ public class DiscoveryNodesTests extends ESTestCase {
|
||||||
for (int i = 0; i < numNodeNames; i++) {
|
for (int i = 0; i < numNodeNames; i++) {
|
||||||
DiscoveryNode discoveryNode = randomFrom(nodes);
|
DiscoveryNode discoveryNode = randomFrom(nodes);
|
||||||
nodeSelectors.add(discoveryNode.name());
|
nodeSelectors.add(discoveryNode.name());
|
||||||
expectedNodeIdsSet.add(discoveryNode.id());
|
expectedNodeIdsSet.add(discoveryNode.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
String[] resolvedNodesIds = discoveryNodes.resolveNodesIds(nodeSelectors.toArray(new String[nodeSelectors.size()]));
|
String[] resolvedNodesIds = discoveryNodes.resolveNodesIds(nodeSelectors.toArray(new String[nodeSelectors.size()]));
|
||||||
|
@ -111,8 +111,8 @@ public class DiscoveryNodesTests extends ESTestCase {
|
||||||
discoBuilder = discoBuilder.put(node);
|
discoBuilder = discoBuilder.put(node);
|
||||||
nodesList.add(node);
|
nodesList.add(node);
|
||||||
}
|
}
|
||||||
discoBuilder.localNodeId(randomFrom(nodesList).id());
|
discoBuilder.localNodeId(randomFrom(nodesList).getId());
|
||||||
discoBuilder.masterNodeId(randomFrom(nodesList).id());
|
discoBuilder.masterNodeId(randomFrom(nodesList).getId());
|
||||||
return discoBuilder.build();
|
return discoBuilder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,7 +158,7 @@ public class DiscoveryNodesTests extends ESTestCase {
|
||||||
Set<String> ids = new HashSet<>();
|
Set<String> ids = new HashSet<>();
|
||||||
nodes.getNodes().valuesIt().forEachRemaining(node -> {
|
nodes.getNodes().valuesIt().forEachRemaining(node -> {
|
||||||
if ("value".equals(node.getAttributes().get("attr"))) {
|
if ("value".equals(node.getAttributes().get("attr"))) {
|
||||||
ids.add(node.id());
|
ids.add(node.getId());
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
return ids;
|
return ids;
|
||||||
|
|
|
@ -389,7 +389,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
|
||||||
ArrayList<DiscoveryNode> discoveryNodes = CollectionUtils.iterableAsArrayList(clusterState.nodes());
|
ArrayList<DiscoveryNode> discoveryNodes = CollectionUtils.iterableAsArrayList(clusterState.nodes());
|
||||||
Collections.shuffle(discoveryNodes, random());
|
Collections.shuffle(discoveryNodes, random());
|
||||||
for (DiscoveryNode node : discoveryNodes) {
|
for (DiscoveryNode node : discoveryNodes) {
|
||||||
nodes.remove(node.id());
|
nodes.remove(node.getId());
|
||||||
numNodes--;
|
numNodes--;
|
||||||
if (numNodes <= 0) {
|
if (numNodes <= 0) {
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -319,14 +319,14 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
|
||||||
RoutingTable routingTable = RoutingTable.builder()
|
RoutingTable routingTable = RoutingTable.builder()
|
||||||
.add(IndexRoutingTable.builder(shard1.getIndex())
|
.add(IndexRoutingTable.builder(shard1.getIndex())
|
||||||
.addIndexShard(new IndexShardRoutingTable.Builder(shard1)
|
.addIndexShard(new IndexShardRoutingTable.Builder(shard1)
|
||||||
.addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), newNode.id(), true, ShardRoutingState.STARTED))
|
.addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), newNode.getId(), true, ShardRoutingState.STARTED))
|
||||||
.addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), oldNode1.id(), false, ShardRoutingState.STARTED))
|
.addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), oldNode1.getId(), false, ShardRoutingState.STARTED))
|
||||||
.build())
|
.build())
|
||||||
)
|
)
|
||||||
.add(IndexRoutingTable.builder(shard2.getIndex())
|
.add(IndexRoutingTable.builder(shard2.getIndex())
|
||||||
.addIndexShard(new IndexShardRoutingTable.Builder(shard2)
|
.addIndexShard(new IndexShardRoutingTable.Builder(shard2)
|
||||||
.addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), newNode.id(), true, ShardRoutingState.STARTED))
|
.addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), newNode.getId(), true, ShardRoutingState.STARTED))
|
||||||
.addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), oldNode1.id(), false, ShardRoutingState.STARTED))
|
.addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), oldNode1.getId(), false, ShardRoutingState.STARTED))
|
||||||
.build())
|
.build())
|
||||||
)
|
)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -122,8 +122,8 @@ public class ZenFaultDetectionTests extends ESTestCase {
|
||||||
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
|
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
|
||||||
builder.put(nodeA);
|
builder.put(nodeA);
|
||||||
builder.put(nodeB);
|
builder.put(nodeB);
|
||||||
builder.localNodeId(nodeA.id());
|
builder.localNodeId(nodeA.getId());
|
||||||
builder.masterNodeId(master ? nodeA.id() : nodeB.id());
|
builder.masterNodeId(master ? nodeA.getId() : nodeB.getId());
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,8 +131,8 @@ public class ZenFaultDetectionTests extends ESTestCase {
|
||||||
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
|
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
|
||||||
builder.put(nodeA);
|
builder.put(nodeA);
|
||||||
builder.put(nodeB);
|
builder.put(nodeB);
|
||||||
builder.localNodeId(nodeB.id());
|
builder.localNodeId(nodeB.getId());
|
||||||
builder.masterNodeId(master ? nodeB.id() : nodeA.id());
|
builder.masterNodeId(master ? nodeB.getId() : nodeA.getId());
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class ElectMasterServiceTests extends ESTestCase {
|
||||||
if (!prevNode.masterNode()) {
|
if (!prevNode.masterNode()) {
|
||||||
assertFalse(node.masterNode());
|
assertFalse(node.masterNode());
|
||||||
} else if (node.masterNode()) {
|
} else if (node.masterNode()) {
|
||||||
assertTrue(prevNode.id().compareTo(node.id()) < 0);
|
assertTrue(prevNode.getId().compareTo(node.getId()) < 0);
|
||||||
}
|
}
|
||||||
prevNode = node;
|
prevNode = node;
|
||||||
}
|
}
|
||||||
|
@ -100,7 +100,7 @@ public class ElectMasterServiceTests extends ESTestCase {
|
||||||
assertNotNull(master);
|
assertNotNull(master);
|
||||||
for (DiscoveryNode node : nodes) {
|
for (DiscoveryNode node : nodes) {
|
||||||
if (node.masterNode()) {
|
if (node.masterNode()) {
|
||||||
assertTrue(master.id().compareTo(node.id()) <= 0);
|
assertTrue(master.getId().compareTo(node.getId()) <= 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,7 +97,7 @@ public class NodeJoinControllerTests extends ESTestCase {
|
||||||
final DiscoveryNode localNode = initialNodes.localNode();
|
final DiscoveryNode localNode = initialNodes.localNode();
|
||||||
// make sure we have a master
|
// make sure we have a master
|
||||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(
|
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(
|
||||||
DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.id())));
|
DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.getId())));
|
||||||
nodeJoinController = new NodeJoinController(clusterService, new NoopRoutingService(Settings.EMPTY),
|
nodeJoinController = new NodeJoinController(clusterService, new NoopRoutingService(Settings.EMPTY),
|
||||||
new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)),
|
new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)),
|
||||||
Settings.EMPTY);
|
Settings.EMPTY);
|
||||||
|
@ -555,7 +555,7 @@ public class NodeJoinControllerTests extends ESTestCase {
|
||||||
logger.info("assert for [{}] in:\n{}", expectedNodes, state.prettyPrint());
|
logger.info("assert for [{}] in:\n{}", expectedNodes, state.prettyPrint());
|
||||||
DiscoveryNodes discoveryNodes = state.nodes();
|
DiscoveryNodes discoveryNodes = state.nodes();
|
||||||
for (DiscoveryNode node : expectedNodes) {
|
for (DiscoveryNode node : expectedNodes) {
|
||||||
assertThat("missing " + node + "\n" + discoveryNodes.prettyPrint(), discoveryNodes.get(node.id()), equalTo(node));
|
assertThat("missing " + node + "\n" + discoveryNodes.prettyPrint(), discoveryNodes.get(node.getId()), equalTo(node));
|
||||||
}
|
}
|
||||||
assertThat(discoveryNodes.size(), equalTo(expectedNodes.size()));
|
assertThat(discoveryNodes.size(), equalTo(expectedNodes.size()));
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ public class ZenPingTests extends ESTestCase {
|
||||||
ZenPing.PingResponse[] aggregate = collection.toArray();
|
ZenPing.PingResponse[] aggregate = collection.toArray();
|
||||||
|
|
||||||
for (ZenPing.PingResponse ping : aggregate) {
|
for (ZenPing.PingResponse ping : aggregate) {
|
||||||
int nodeId = Integer.parseInt(ping.node().id());
|
int nodeId = Integer.parseInt(ping.node().getId());
|
||||||
assertThat(maxIdPerNode[nodeId], equalTo(ping.id()));
|
assertThat(maxIdPerNode[nodeId], equalTo(ping.id()));
|
||||||
assertThat(masterPerNode[nodeId], equalTo(ping.master()));
|
assertThat(masterPerNode[nodeId], equalTo(ping.master()));
|
||||||
assertThat(hasJoinedOncePerNode[nodeId], equalTo(ping.hasJoinedOnce()));
|
assertThat(hasJoinedOncePerNode[nodeId], equalTo(ping.hasJoinedOnce()));
|
||||||
|
|
|
@ -110,14 +110,14 @@ public class UnicastZenPingIT extends ESTestCase {
|
||||||
logger.info("ping from UZP_A");
|
logger.info("ping from UZP_A");
|
||||||
ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(10));
|
ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(10));
|
||||||
assertThat(pingResponses.length, equalTo(1));
|
assertThat(pingResponses.length, equalTo(1));
|
||||||
assertThat(pingResponses[0].node().id(), equalTo("UZP_B"));
|
assertThat(pingResponses[0].node().getId(), equalTo("UZP_B"));
|
||||||
assertTrue(pingResponses[0].hasJoinedOnce());
|
assertTrue(pingResponses[0].hasJoinedOnce());
|
||||||
|
|
||||||
// ping again, this time from B,
|
// ping again, this time from B,
|
||||||
logger.info("ping from UZP_B");
|
logger.info("ping from UZP_B");
|
||||||
pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(10));
|
pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(10));
|
||||||
assertThat(pingResponses.length, equalTo(1));
|
assertThat(pingResponses.length, equalTo(1));
|
||||||
assertThat(pingResponses[0].node().id(), equalTo("UZP_A"));
|
assertThat(pingResponses[0].node().getId(), equalTo("UZP_A"));
|
||||||
assertFalse(pingResponses[0].hasJoinedOnce());
|
assertFalse(pingResponses[0].hasJoinedOnce());
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -97,11 +97,11 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
||||||
this.service = service;
|
this.service = service;
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
this.logger = logger;
|
this.logger = logger;
|
||||||
this.clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().put(discoveryNode).localNodeId(discoveryNode.id()).build()).build();
|
this.clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().put(discoveryNode).localNodeId(discoveryNode.getId()).build()).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public MockNode setAsMaster() {
|
public MockNode setAsMaster() {
|
||||||
this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(discoveryNode.id())).build();
|
this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(discoveryNode.getId())).build();
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,8 +306,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
||||||
.put(nodeA.discoveryNode)
|
.put(nodeA.discoveryNode)
|
||||||
.put(nodeB.discoveryNode)
|
.put(nodeB.discoveryNode)
|
||||||
.put(nodeC.discoveryNode)
|
.put(nodeC.discoveryNode)
|
||||||
.masterNodeId(nodeB.discoveryNode.id())
|
.masterNodeId(nodeB.discoveryNode.getId())
|
||||||
.localNodeId(nodeB.discoveryNode.id())
|
.localNodeId(nodeB.discoveryNode.getId())
|
||||||
.build();
|
.build();
|
||||||
previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
|
previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
|
||||||
clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
|
clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
|
||||||
|
@ -358,7 +358,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
||||||
});
|
});
|
||||||
|
|
||||||
// Initial cluster state
|
// Initial cluster state
|
||||||
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).masterNodeId(nodeA.discoveryNode.id()).build();
|
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.getId()).masterNodeId(nodeA.discoveryNode.getId()).build();
|
||||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodes).build();
|
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodes).build();
|
||||||
|
|
||||||
// cluster state update - add nodeB
|
// cluster state update - add nodeB
|
||||||
|
@ -485,7 +485,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
||||||
for (int i = 0; i < dataNodes; i++) {
|
for (int i = 0; i < dataNodes; i++) {
|
||||||
discoveryNodesBuilder.put(createMockNode("data_" + i, dataSettings).discoveryNode);
|
discoveryNodesBuilder.put(createMockNode("data_" + i, dataSettings).discoveryNode);
|
||||||
}
|
}
|
||||||
discoveryNodesBuilder.localNodeId(master.discoveryNode.id()).masterNodeId(master.discoveryNode.id());
|
discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId());
|
||||||
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
|
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
|
||||||
MetaData metaData = MetaData.EMPTY_META_DATA;
|
MetaData metaData = MetaData.EMPTY_META_DATA;
|
||||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).nodes(discoveryNodes).build();
|
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).nodes(discoveryNodes).build();
|
||||||
|
@ -562,7 +562,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
||||||
logger.info("--> expecting commit to {}. good nodes [{}], errors [{}], timeouts [{}]. min_master_nodes [{}]",
|
logger.info("--> expecting commit to {}. good nodes [{}], errors [{}], timeouts [{}]. min_master_nodes [{}]",
|
||||||
expectedBehavior, goodNodes + 1, errorNodes, timeOutNodes, minMasterNodes);
|
expectedBehavior, goodNodes + 1, errorNodes, timeOutNodes, minMasterNodes);
|
||||||
|
|
||||||
discoveryNodesBuilder.localNodeId(master.discoveryNode.id()).masterNodeId(master.discoveryNode.id());
|
discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId());
|
||||||
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
|
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
|
||||||
MetaData metaData = MetaData.EMPTY_META_DATA;
|
MetaData metaData = MetaData.EMPTY_META_DATA;
|
||||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).nodes(discoveryNodes).build();
|
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).nodes(discoveryNodes).build();
|
||||||
|
@ -623,7 +623,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
||||||
try {
|
try {
|
||||||
MockNode otherNode = createMockNode("otherNode");
|
MockNode otherNode = createMockNode("otherNode");
|
||||||
state = ClusterState.builder(node.clusterState).nodes(
|
state = ClusterState.builder(node.clusterState).nodes(
|
||||||
DiscoveryNodes.builder(node.nodes()).put(otherNode.discoveryNode).localNodeId(otherNode.discoveryNode.id()).build()
|
DiscoveryNodes.builder(node.nodes()).put(otherNode.discoveryNode).localNodeId(otherNode.discoveryNode.getId()).build()
|
||||||
).incrementVersion().build();
|
).incrementVersion().build();
|
||||||
node.action.validateIncomingState(state, node.clusterState);
|
node.action.validateIncomingState(state, node.clusterState);
|
||||||
fail("node accepted state with existent but wrong local node");
|
fail("node accepted state with existent but wrong local node");
|
||||||
|
@ -696,7 +696,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
||||||
MockNode master = createMockNode("master", settings);
|
MockNode master = createMockNode("master", settings);
|
||||||
MockNode node = createMockNode("node", settings);
|
MockNode node = createMockNode("node", settings);
|
||||||
ClusterState state = ClusterState.builder(master.clusterState)
|
ClusterState state = ClusterState.builder(master.clusterState)
|
||||||
.nodes(DiscoveryNodes.builder(master.clusterState.nodes()).put(node.discoveryNode).masterNodeId(master.discoveryNode.id())).build();
|
.nodes(DiscoveryNodes.builder(master.clusterState.nodes()).put(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build();
|
||||||
|
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
state = ClusterState.builder(state).incrementVersion().build();
|
state = ClusterState.builder(state).incrementVersion().build();
|
||||||
|
|
|
@ -135,7 +135,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(changed, equalTo(true));
|
assertThat(changed, equalTo(true));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -173,7 +173,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(changed, equalTo(true));
|
assertThat(changed, equalTo(true));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId()));
|
||||||
if (useAllocationIds) {
|
if (useAllocationIds) {
|
||||||
// check that allocation id is reused
|
// check that allocation id is reused
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("allocId1"));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("allocId1"));
|
||||||
|
@ -195,7 +195,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
DiscoveryNode allocatedNode = node1HasPrimaryShard ? node1 : node2;
|
DiscoveryNode allocatedNode = node1HasPrimaryShard ? node1 : node2;
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(allocatedNode.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(allocatedNode.getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -234,7 +234,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(changed, equalTo(true));
|
assertThat(changed, equalTo(true));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -247,7 +247,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(changed, equalTo(true));
|
assertThat(changed, equalTo(true));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -265,7 +265,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(changed, equalTo(true));
|
assertThat(changed, equalTo(true));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId()));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("some allocId"));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("some allocId"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -466,7 +466,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
|
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), anyOf(equalTo(node2.id()), equalTo(node1.id())));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), anyOf(equalTo(node2.getId()), equalTo(node1.getId())));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -507,7 +507,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
|
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, boolean asNew, Version version, String... activeAllocationIds) {
|
private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, boolean asNew, Version version, String... activeAllocationIds) {
|
||||||
|
|
|
@ -122,7 +122,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.addData(nodeToMatch, false, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
.addData(nodeToMatch, false, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||||
testAllocator.allocateUnassigned(allocation);
|
testAllocator.allocateUnassigned(allocation);
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -135,7 +135,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.addData(nodeToMatch, false, "MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM"));
|
.addData(nodeToMatch, false, "MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM"));
|
||||||
testAllocator.allocateUnassigned(allocation);
|
testAllocator.allocateUnassigned(allocation);
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -148,7 +148,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.addData(nodeToMatch, false, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
.addData(nodeToMatch, false, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||||
testAllocator.allocateUnassigned(allocation);
|
testAllocator.allocateUnassigned(allocation);
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -246,7 +246,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||||
changed = testAllocator.allocateUnassigned(allocation);
|
changed = testAllocator.allocateUnassigned(allocation);
|
||||||
assertThat(changed, equalTo(true));
|
assertThat(changed, equalTo(true));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testCancelRecoveryBetterSyncId() {
|
public void testCancelRecoveryBetterSyncId() {
|
||||||
|
@ -284,7 +284,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) {
|
private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) {
|
||||||
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED);
|
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.getId(), true, ShardRoutingState.STARTED);
|
||||||
MetaData metaData = MetaData.builder()
|
MetaData metaData = MetaData.builder()
|
||||||
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT).put(settings))
|
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT).put(settings))
|
||||||
.numberOfShards(1).numberOfReplicas(1)
|
.numberOfShards(1).numberOfReplicas(1)
|
||||||
|
@ -306,7 +306,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) {
|
private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) {
|
||||||
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED);
|
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.getId(), true, ShardRoutingState.STARTED);
|
||||||
MetaData metaData = MetaData.builder()
|
MetaData metaData = MetaData.builder()
|
||||||
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT))
|
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT))
|
||||||
.numberOfShards(1).numberOfReplicas(1)
|
.numberOfShards(1).numberOfReplicas(1)
|
||||||
|
@ -316,7 +316,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.add(IndexRoutingTable.builder(shardId.getIndex())
|
.add(IndexRoutingTable.builder(shardId.getIndex())
|
||||||
.addIndexShard(new IndexShardRoutingTable.Builder(shardId)
|
.addIndexShard(new IndexShardRoutingTable.Builder(shardId)
|
||||||
.addShard(primaryShard)
|
.addShard(primaryShard)
|
||||||
.addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node2.id(), null, null, false, ShardRoutingState.INITIALIZING, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)))
|
.addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node2.getId(), null, null, false, ShardRoutingState.INITIALIZING, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)))
|
||||||
.build())
|
.build())
|
||||||
)
|
)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -561,7 +561,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
||||||
ensureSearchable(indexName);
|
ensureSearchable(indexName);
|
||||||
|
|
||||||
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
|
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
|
||||||
final String blueNodeId = internalCluster().getInstance(ClusterService.class, blueNodeName).localNode().id();
|
final String blueNodeId = internalCluster().getInstance(ClusterService.class, blueNodeName).localNode().getId();
|
||||||
|
|
||||||
assertFalse(stateResponse.getState().getRoutingNodes().node(blueNodeId).isEmpty());
|
assertFalse(stateResponse.getState().getRoutingNodes().node(blueNodeId).isEmpty());
|
||||||
|
|
||||||
|
|
|
@ -381,8 +381,8 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||||
final String masterNode = internalCluster().getMasterName();
|
final String masterNode = internalCluster().getMasterName();
|
||||||
final String nonMasterNode = nodes.get(0).equals(masterNode) ? nodes.get(1) : nodes.get(0);
|
final String nonMasterNode = nodes.get(0).equals(masterNode) ? nodes.get(1) : nodes.get(0);
|
||||||
|
|
||||||
final String masterId = internalCluster().clusterService(masterNode).localNode().id();
|
final String masterId = internalCluster().clusterService(masterNode).localNode().getId();
|
||||||
final String nonMasterId = internalCluster().clusterService(nonMasterNode).localNode().id();
|
final String nonMasterId = internalCluster().clusterService(nonMasterNode).localNode().getId();
|
||||||
|
|
||||||
final int numShards = scaledRandomIntBetween(2, 10);
|
final int numShards = scaledRandomIntBetween(2, 10);
|
||||||
assertAcked(prepareCreate("test")
|
assertAcked(prepareCreate("test")
|
||||||
|
|
|
@ -140,7 +140,7 @@ public class IndicesStoreTests extends ESTestCase {
|
||||||
|
|
||||||
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
||||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||||
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode).put(new DiscoveryNode("xyz",
|
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode).put(new DiscoveryNode("xyz",
|
||||||
new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT)));
|
new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT)));
|
||||||
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1));
|
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1));
|
||||||
int localShardId = randomInt(numShards - 1);
|
int localShardId = randomInt(numShards - 1);
|
||||||
|
@ -163,7 +163,7 @@ public class IndicesStoreTests extends ESTestCase {
|
||||||
|
|
||||||
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
||||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||||
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode));
|
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode));
|
||||||
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1));
|
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1));
|
||||||
for (int i = 0; i < numShards; i++) {
|
for (int i = 0; i < numShards; i++) {
|
||||||
String relocatingNodeId = randomBoolean() ? null : "def";
|
String relocatingNodeId = randomBoolean() ? null : "def";
|
||||||
|
@ -185,7 +185,7 @@ public class IndicesStoreTests extends ESTestCase {
|
||||||
final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random());
|
final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random());
|
||||||
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
||||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||||
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode).put(new DiscoveryNode("xyz",
|
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode).put(new DiscoveryNode("xyz",
|
||||||
new LocalTransportAddress("xyz"), emptyMap(), emptySet(), nodeVersion)));
|
new LocalTransportAddress("xyz"), emptyMap(), emptySet(), nodeVersion)));
|
||||||
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1));
|
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1));
|
||||||
for (int i = 0; i < numShards; i++) {
|
for (int i = 0; i < numShards; i++) {
|
||||||
|
@ -207,7 +207,7 @@ public class IndicesStoreTests extends ESTestCase {
|
||||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||||
final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random());
|
final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random());
|
||||||
|
|
||||||
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id())
|
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId())
|
||||||
.put(localNode)
|
.put(localNode)
|
||||||
.put(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT))
|
.put(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT))
|
||||||
.put(new DiscoveryNode("def", new LocalTransportAddress("def"), emptyMap(), emptySet(), nodeVersion) // <-- only set relocating, since we're testing that in this test
|
.put(new DiscoveryNode("def", new LocalTransportAddress("def"), emptyMap(), emptySet(), nodeVersion) // <-- only set relocating, since we're testing that in this test
|
||||||
|
|
|
@ -93,10 +93,10 @@ public class CapturingTransport implements Transport {
|
||||||
public Map<String, List<CapturedRequest>> capturedRequestsByTargetNode() {
|
public Map<String, List<CapturedRequest>> capturedRequestsByTargetNode() {
|
||||||
Map<String, List<CapturedRequest>> map = new HashMap<>();
|
Map<String, List<CapturedRequest>> map = new HashMap<>();
|
||||||
for (CapturedRequest request : capturedRequests) {
|
for (CapturedRequest request : capturedRequests) {
|
||||||
List<CapturedRequest> nodeList = map.get(request.node.id());
|
List<CapturedRequest> nodeList = map.get(request.node.getId());
|
||||||
if (nodeList == null) {
|
if (nodeList == null) {
|
||||||
nodeList = new ArrayList<>();
|
nodeList = new ArrayList<>();
|
||||||
map.put(request.node.id(), nodeList);
|
map.put(request.node.getId(), nodeList);
|
||||||
}
|
}
|
||||||
nodeList.add(request);
|
nodeList.add(request);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue