Remove DiscoveryNode#name in favour of existing DiscoveryNode#getName
This commit is contained in:
parent
9889f10e5e
commit
a8bbdff3bc
|
@ -70,7 +70,7 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
|||
for (NodeInfo nodeInfo : this) {
|
||||
builder.startObject(nodeInfo.getNode().getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
||||
builder.field("name", nodeInfo.getNode().name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", nodeInfo.getNode().getName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("transport_address", nodeInfo.getNode().address().toString());
|
||||
builder.field("host", nodeInfo.getNode().getHostName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("ip", nodeInfo.getNode().getHostAddress(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
|
|
@ -299,7 +299,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (!params.param("node_info_format", "default").equals("none")) {
|
||||
builder.field("name", getNode().name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", getNode().getName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("transport_address", getNode().address().toString(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("host", getNode().getHostName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("ip", getNode().getAddress(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
|
|
@ -124,7 +124,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : getPerNodeTasks().entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", node.name());
|
||||
builder.field("name", node.getName());
|
||||
builder.field("transport_address", node.address().toString());
|
||||
builder.field("host", node.getHostName());
|
||||
builder.field("ip", node.getAddress());
|
||||
|
|
|
@ -87,7 +87,7 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
|||
builder.startObject(Fields.NODES);
|
||||
for (DiscoveryNode node : nodes) {
|
||||
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field(Fields.NAME, node.name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field(Fields.NAME, node.getName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
|
|
@ -383,7 +383,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
// use discovered information but do keep the original transport address,
|
||||
// so people can control which address is exactly used.
|
||||
DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode();
|
||||
newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.getId(), nodeWithInfo.getHostName(),
|
||||
newNodes.add(new DiscoveryNode(nodeWithInfo.getName(), nodeWithInfo.getId(), nodeWithInfo.getHostName(),
|
||||
nodeWithInfo.getHostAddress(), listedNode.getAddress(), nodeWithInfo.getAttributes(),
|
||||
nodeWithInfo.getRoles(), nodeWithInfo.getVersion()));
|
||||
} else {
|
||||
|
|
|
@ -395,7 +395,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
|||
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvaiableUsages) {
|
||||
for (NodeStats nodeStats : nodeStatsArray) {
|
||||
if (nodeStats.getFs() == null) {
|
||||
logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().name());
|
||||
logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().getName());
|
||||
} else {
|
||||
FsInfo.Path leastAvailablePath = null;
|
||||
FsInfo.Path mostAvailablePath = null;
|
||||
|
|
|
@ -228,18 +228,11 @@ public class DiscoveryNode implements Writeable<DiscoveryNode>, ToXContent {
|
|||
return nodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the node.
|
||||
*/
|
||||
public String name() {
|
||||
return this.nodeName;
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the node.
|
||||
*/
|
||||
public String getName() {
|
||||
return name();
|
||||
return this.nodeName;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -371,7 +364,7 @@ public class DiscoveryNode implements Writeable<DiscoveryNode>, ToXContent {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", name());
|
||||
builder.field("name", getName());
|
||||
builder.field("transport_address", address().toString());
|
||||
|
||||
builder.startObject("attributes");
|
||||
|
|
|
@ -167,7 +167,7 @@ public class DiscoveryNodeFilters {
|
|||
}
|
||||
} else if ("_name".equals(attr) || "name".equals(attr)) {
|
||||
for (String value : values) {
|
||||
if (Regex.simpleMatch(value, node.name())) {
|
||||
if (Regex.simpleMatch(value, node.getName())) {
|
||||
if (opType == OpType.OR) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -361,7 +361,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
} else {
|
||||
// not a node id, try and search by name
|
||||
for (DiscoveryNode node : this) {
|
||||
if (Regex.simpleMatch(nodeId, node.name())) {
|
||||
if (Regex.simpleMatch(nodeId, node.getName())) {
|
||||
resolvedNodesIds.add(node.getId());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -519,7 +519,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
|
||||
if (includeRelocations) {
|
||||
long relocatingShardsSize = sizeOfRelocatingShards(node, clusterInfo, true, usage.getPath());
|
||||
DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().name(), usage.getPath(),
|
||||
DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().getName(), usage.getPath(),
|
||||
usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("usage without relocations: {}", usage);
|
||||
|
@ -539,7 +539,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
*/
|
||||
public DiskUsage averageUsage(RoutingNode node, ImmutableOpenMap<String, DiskUsage> usages) {
|
||||
if (usages.size() == 0) {
|
||||
return new DiskUsage(node.nodeId(), node.node().name(), "_na_", 0, 0);
|
||||
return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", 0, 0);
|
||||
}
|
||||
long totalBytes = 0;
|
||||
long freeBytes = 0;
|
||||
|
@ -547,7 +547,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
totalBytes += du.value.getTotalBytes();
|
||||
freeBytes += du.value.getFreeBytes();
|
||||
}
|
||||
return new DiskUsage(node.nodeId(), node.node().name(), "_na_", totalBytes / usages.size(), freeBytes / usages.size());
|
||||
return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", totalBytes / usages.size(), freeBytes / usages.size());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -286,7 +286,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
String replicaSyncId = storeFilesMetaData.syncId();
|
||||
// see if we have a sync id we can make use of
|
||||
if (replicaSyncId != null && replicaSyncId.equals(primarySyncId)) {
|
||||
logger.trace("{}: node [{}] has same sync id {} as primary", shard, discoNode.name(), replicaSyncId);
|
||||
logger.trace("{}: node [{}] has same sync id {} as primary", shard, discoNode.getName(), replicaSyncId);
|
||||
nodesToSize.put(discoNode, Long.MAX_VALUE);
|
||||
} else {
|
||||
long sizeMatched = 0;
|
||||
|
@ -297,7 +297,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
logger.trace("{}: node [{}] has [{}/{}] bytes of re-usable data",
|
||||
shard, discoNode.name(), new ByteSizeValue(sizeMatched), sizeMatched);
|
||||
shard, discoNode.getName(), new ByteSizeValue(sizeMatched), sizeMatched);
|
||||
nodesToSize.put(discoNode, sizeMatched);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -320,7 +320,7 @@ public class RecoveryState implements ToXContent, Streamable {
|
|||
builder.field(Fields.HOST, sourceNode.getHostName());
|
||||
builder.field(Fields.TRANSPORT_ADDRESS, sourceNode.address().toString());
|
||||
builder.field(Fields.IP, sourceNode.getHostAddress());
|
||||
builder.field(Fields.NAME, sourceNode.name());
|
||||
builder.field(Fields.NAME, sourceNode.getName());
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
|
@ -329,7 +329,7 @@ public class RecoveryState implements ToXContent, Streamable {
|
|||
builder.field(Fields.HOST, targetNode.getHostName());
|
||||
builder.field(Fields.TRANSPORT_ADDRESS, targetNode.address().toString());
|
||||
builder.field(Fields.IP, targetNode.getHostAddress());
|
||||
builder.field(Fields.NAME, targetNode.name());
|
||||
builder.field(Fields.NAME, targetNode.getName());
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject(Fields.INDEX);
|
||||
|
|
|
@ -143,7 +143,7 @@ public class RestAllocationAction extends AbstractCatAction {
|
|||
table.addCell(diskPercent < 0 ? null : diskPercent);
|
||||
table.addCell(node.getHostName());
|
||||
table.addCell(node.getHostAddress());
|
||||
table.addCell(node.name());
|
||||
table.addCell(node.getName());
|
||||
table.endRow();
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ public class RestNodeAttrsAction extends AbstractCatAction {
|
|||
NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
table.startRow();
|
||||
table.addCell(node.name());
|
||||
table.addCell(node.getName());
|
||||
table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4));
|
||||
table.addCell(info == null ? null : info.getProcess().getId());
|
||||
table.addCell(node.getHostName());
|
||||
|
|
|
@ -288,7 +288,7 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
}
|
||||
table.addCell(roles);
|
||||
table.addCell(masterId == null ? "x" : masterId.equals(node.getId()) ? "*" : "-");
|
||||
table.addCell(node.name());
|
||||
table.addCell(node.getName());
|
||||
|
||||
CompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion();
|
||||
table.addCell(completionStats == null ? null : completionStats.getSize());
|
||||
|
|
|
@ -99,7 +99,7 @@ public class RestPluginsAction extends AbstractCatAction {
|
|||
for (PluginInfo pluginInfo : info.getPlugins().getPluginInfos()) {
|
||||
table.startRow();
|
||||
table.addCell(node.getId());
|
||||
table.addCell(node.name());
|
||||
table.addCell(node.getName());
|
||||
table.addCell(pluginInfo.getName());
|
||||
table.addCell(pluginInfo.getVersion());
|
||||
table.addCell(pluginInfo.getDescription());
|
||||
|
|
|
@ -208,10 +208,10 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
String ip = state.getState().nodes().get(shard.currentNodeId()).getHostAddress();
|
||||
String nodeId = shard.currentNodeId();
|
||||
StringBuilder name = new StringBuilder();
|
||||
name.append(state.getState().nodes().get(shard.currentNodeId()).name());
|
||||
name.append(state.getState().nodes().get(shard.currentNodeId()).getName());
|
||||
if (shard.relocating()) {
|
||||
String reloIp = state.getState().nodes().get(shard.relocatingNodeId()).getHostAddress();
|
||||
String reloNme = state.getState().nodes().get(shard.relocatingNodeId()).name();
|
||||
String reloNme = state.getState().nodes().get(shard.relocatingNodeId()).getName();
|
||||
String reloNodeId = shard.relocatingNodeId();
|
||||
name.append(" -> ");
|
||||
name.append(reloIp);
|
||||
|
|
|
@ -45,7 +45,7 @@ public class ConnectTransportException extends ActionTransportException {
|
|||
}
|
||||
|
||||
public ConnectTransportException(DiscoveryNode node, String msg, String action, Throwable cause) {
|
||||
super(node == null ? null : node.name(), node == null ? null : node.address(), action, msg, cause);
|
||||
super(node == null ? null : node.getName(), node == null ? null : node.address(), action, msg, cause);
|
||||
this.node = node;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ import java.io.IOException;
|
|||
public class ReceiveTimeoutTransportException extends ActionTransportException {
|
||||
|
||||
public ReceiveTimeoutTransportException(DiscoveryNode node, String action, String msg) {
|
||||
super(node.name(), node.address(), action, msg, null);
|
||||
super(node.getName(), node.address(), action, msg, null);
|
||||
}
|
||||
|
||||
public ReceiveTimeoutTransportException(StreamInput in) throws IOException {
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.io.IOException;
|
|||
public class SendRequestTransportException extends ActionTransportException implements ElasticsearchWrapperException {
|
||||
|
||||
public SendRequestTransportException(DiscoveryNode node, String action, Throwable cause) {
|
||||
super(node == null ? null : node.name(), node == null ? null : node.address(), action, cause);
|
||||
super(node == null ? null : node.getName(), node == null ? null : node.address(), action, cause);
|
||||
}
|
||||
|
||||
public SendRequestTransportException(StreamInput in) throws IOException {
|
||||
|
|
|
@ -873,7 +873,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
|
|||
if (t instanceof RemoteTransportException) {
|
||||
return (RemoteTransportException) t;
|
||||
}
|
||||
return new RemoteTransportException(localNode.name(), localNode.getAddress(), action, t);
|
||||
return new RemoteTransportException(localNode.getName(), localNode.getAddress(), action, t);
|
||||
}
|
||||
|
||||
protected void processException(final TransportResponseHandler handler, final RemoteTransportException rtx) {
|
||||
|
|
|
@ -366,7 +366,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
// a new node, add it, but also add the tribe name to the attributes
|
||||
Map<String, String> tribeAttr = new HashMap<>(tribe.getAttributes());
|
||||
tribeAttr.put(TRIBE_NAME_SETTING.getKey(), tribeName);
|
||||
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.getId(), tribe.getHostName(), tribe.getHostAddress(),
|
||||
DiscoveryNode discoNode = new DiscoveryNode(tribe.getName(), tribe.getId(), tribe.getHostName(), tribe.getHostAddress(),
|
||||
tribe.address(), unmodifiableMap(tribeAttr), tribe.getRoles(), tribe.version());
|
||||
clusterStateChanged = true;
|
||||
logger.info("[{}] adding node [{}]", tribeName, discoNode);
|
||||
|
|
|
@ -188,7 +188,7 @@ public class TasksIT extends ESIntegTestCase {
|
|||
|
||||
logger.debug("number of shards, total: [{}], primaries: [{}] ", numberOfShards.totalNumShards, numberOfShards.numPrimaries);
|
||||
logger.debug("main events {}", numberOfEvents(RefreshAction.NAME, Tuple::v1));
|
||||
logger.debug("main event node {}", findEvents(RefreshAction.NAME, Tuple::v1).get(0).getNode().name());
|
||||
logger.debug("main event node {}", findEvents(RefreshAction.NAME, Tuple::v1).get(0).getNode().getName());
|
||||
logger.debug("[s] events {}", numberOfEvents(RefreshAction.NAME + "[s]", Tuple::v1));
|
||||
logger.debug("[s][*] events {}", numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1));
|
||||
logger.debug("nodes with the index {}", internalCluster().nodesInclude("test"));
|
||||
|
@ -436,7 +436,7 @@ public class TasksIT extends ESIntegTestCase {
|
|||
DiscoveryNode node = internalCluster().getInstance(ClusterService.class, nodeName).localNode();
|
||||
RecordingTaskManagerListener listener = new RecordingTaskManagerListener(node, Strings.splitStringToArray(actionMasks, ','));
|
||||
((MockTaskManager) internalCluster().getInstance(TransportService.class, nodeName).getTaskManager()).addListener(listener);
|
||||
RecordingTaskManagerListener oldListener = listeners.put(new Tuple<>(node.name(), actionMasks), listener);
|
||||
RecordingTaskManagerListener oldListener = listeners.put(new Tuple<>(node.getName(), actionMasks), listener);
|
||||
assertNull(oldListener);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
|
|||
for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStatus : shardStatuses) {
|
||||
for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) {
|
||||
if (corruptedShardIDMap.containsKey(shardStatus.key)
|
||||
&& corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().name())) {
|
||||
&& corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().getName())) {
|
||||
assertThat(status.getLegacyVersion(), greaterThanOrEqualTo(0L));
|
||||
assertThat(status.getStoreException(), notNullValue());
|
||||
} else {
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder;
|
|||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsResponse;
|
||||
import org.elasticsearch.action.update.UpdateRequestBuilder;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
|
@ -259,7 +258,7 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase {
|
|||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndexName())) {
|
||||
String name = clusterState.nodes().get(shardRouting.currentNodeId()).name();
|
||||
String name = clusterState.nodes().get(shardRouting.currentNodeId()).getName();
|
||||
assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,8 +53,8 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
|
|||
}
|
||||
logger.info("--> start master node");
|
||||
final String masterNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(masterNodeName));
|
||||
|
||||
logger.info("--> stop master node");
|
||||
internalCluster().stopCurrentMasterNode();
|
||||
|
@ -68,8 +68,8 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
|
|||
|
||||
logger.info("--> start master node");
|
||||
final String nextMasterEligibleNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligibleNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligibleNodeName));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(nextMasterEligibleNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(nextMasterEligibleNodeName));
|
||||
}
|
||||
|
||||
public void testElectOnlyBetweenMasterNodes() throws IOException {
|
||||
|
@ -83,19 +83,19 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
|
|||
}
|
||||
logger.info("--> start master node (1)");
|
||||
final String masterNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(masterNodeName));
|
||||
|
||||
logger.info("--> start master node (2)");
|
||||
final String nextMasterEligableNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(masterNodeName));
|
||||
|
||||
logger.info("--> closing master node (1)");
|
||||
internalCluster().stopCurrentMasterNode();
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(nextMasterEligableNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().getName(), equalTo(nextMasterEligableNodeName));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -92,7 +92,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
|||
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
|||
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
|||
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
|||
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
|||
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
|||
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
|
||||
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
|||
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).name(), equalTo(node_0));
|
||||
assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), equalTo(node_0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
|||
int numShardsOnNode1 = 0;
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
if ("node1".equals(clusterState.nodes().get(shardRouting.currentNodeId()).name())) {
|
||||
if ("node1".equals(clusterState.nodes().get(shardRouting.currentNodeId()).getName())) {
|
||||
numShardsOnNode1++;
|
||||
}
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
|||
indexRoutingTable = clusterState.routingTable().index("test");
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).name(), equalTo(node_1));
|
||||
assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), equalTo(node_1));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ public class DiscoveryNodesTests extends ESTestCase {
|
|||
DiscoveryNodes discoveryNodes = buildDiscoveryNodes();
|
||||
DiscoveryNode[] nodes = discoveryNodes.nodes().values().toArray(DiscoveryNode.class);
|
||||
DiscoveryNode node = randomFrom(nodes);
|
||||
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(randomBoolean() ? node.getId() : node.name());
|
||||
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(randomBoolean() ? node.getId() : node.getName());
|
||||
assertThat(resolvedNode.getId(), equalTo(node.getId()));
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ public class DiscoveryNodesTests extends ESTestCase {
|
|||
DiscoveryNode[] nodes = discoveryNodes.nodes().values().toArray(DiscoveryNode.class);
|
||||
for (int i = 0; i < numNodeNames; i++) {
|
||||
DiscoveryNode discoveryNode = randomFrom(nodes);
|
||||
nodeSelectors.add(discoveryNode.name());
|
||||
nodeSelectors.add(discoveryNode.getName());
|
||||
expectedNodeIdsSet.add(discoveryNode.getId());
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.disruption.NetworkDisconnectPartition;
|
||||
|
@ -78,11 +77,11 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
|||
final String primaryNode;
|
||||
final String replicaNode;
|
||||
if (shards.get(0).primary()) {
|
||||
primaryNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().name();
|
||||
replicaNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().name();
|
||||
primaryNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().getName();
|
||||
replicaNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().getName();
|
||||
} else {
|
||||
primaryNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().name();
|
||||
replicaNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().name();
|
||||
primaryNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().getName();
|
||||
replicaNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().getName();
|
||||
}
|
||||
|
||||
NetworkDisconnectPartition partition = new NetworkDisconnectPartition(
|
||||
|
|
|
@ -42,7 +42,7 @@ public class ShardStateIT extends ESIntegTestCase {
|
|||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
final int shard = randomBoolean() ? 0 : 1;
|
||||
final String nodeId = state.routingTable().index("test").shard(shard).primaryShard().currentNodeId();
|
||||
final String node = state.nodes().get(nodeId).name();
|
||||
final String node = state.nodes().get(nodeId).getName();
|
||||
logger.info("--> failing primary of [{}] on node [{}]", shard, node);
|
||||
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
|
||||
indicesService.indexService(resolveIndex("test")).getShard(shard).failShard("simulated test failure", null);
|
||||
|
|
|
@ -572,7 +572,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
|
||||
// now that we started node1 again, a new master should be elected
|
||||
assertThat(clusterService2.state().nodes().masterNode(), is(notNullValue()));
|
||||
if (node_2.equals(clusterService2.state().nodes().masterNode().name())) {
|
||||
if (node_2.equals(clusterService2.state().nodes().masterNode().getName())) {
|
||||
assertThat(testService1.master(), is(false));
|
||||
assertThat(testService2.master(), is(true));
|
||||
} else {
|
||||
|
|
|
@ -626,8 +626,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
DiscoveryNode currentMaster = event.state().nodes().getMasterNode();
|
||||
if (!Objects.equals(previousMaster, currentMaster)) {
|
||||
logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), event.previousState());
|
||||
String previousMasterNodeName = previousMaster != null ? previousMaster.name() : null;
|
||||
String currentMasterNodeName = currentMaster != null ? currentMaster.name() : null;
|
||||
String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null;
|
||||
String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null;
|
||||
masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName));
|
||||
}
|
||||
}
|
||||
|
@ -1192,7 +1192,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
ClusterState state = getNodeClusterState(node);
|
||||
String masterNode = null;
|
||||
if (state.nodes().masterNode() != null) {
|
||||
masterNode = state.nodes().masterNode().name();
|
||||
masterNode = state.nodes().masterNode().getName();
|
||||
}
|
||||
logger.trace("[{}] master is [{}]", node, state.nodes().masterNode());
|
||||
assertThat("node [" + node + "] still has [" + masterNode + "] as master",
|
||||
|
@ -1206,7 +1206,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
ClusterState state = getNodeClusterState(node);
|
||||
String failMsgSuffix = "cluster_state:\n" + state.prettyPrint();
|
||||
assertThat("wrong node count on [" + node + "]. " + failMsgSuffix, state.nodes().size(), equalTo(nodes.size()));
|
||||
String otherMasterNodeName = state.nodes().masterNode() != null ? state.nodes().masterNode().name() : null;
|
||||
String otherMasterNodeName = state.nodes().masterNode() != null ? state.nodes().masterNode().getName() : null;
|
||||
assertThat("wrong master on node [" + node + "]. " + failMsgSuffix, otherMasterNodeName, equalTo(masterNode));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -186,7 +186,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
ClusterState state = internalCluster().getInstance(ClusterService.class).state();
|
||||
DiscoveryNode node = null;
|
||||
for (DiscoveryNode discoveryNode : state.nodes()) {
|
||||
if (discoveryNode.name().equals(noneMasterNode)) {
|
||||
if (discoveryNode.getName().equals(noneMasterNode)) {
|
||||
node = discoveryNode;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
@Override
|
||||
public void onNewClusterState(String reason) {
|
||||
ClusterState newClusterState = action.pendingStatesQueue().getNextClusterStateToProcess();
|
||||
logger.debug("[{}] received version [{}], uuid [{}]", discoveryNode.name(), newClusterState.version(), newClusterState.stateUUID());
|
||||
logger.debug("[{}] received version [{}], uuid [{}]", discoveryNode.getName(), newClusterState.version(), newClusterState.stateUUID());
|
||||
if (listener != null) {
|
||||
ClusterChangedEvent event = new ClusterChangedEvent("", newClusterState, clusterState);
|
||||
listener.clusterChanged(event);
|
||||
|
|
|
@ -457,7 +457,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
}
|
||||
if (!recoveryState.getPrimary() && (useSyncIds == false)) {
|
||||
logger.info("--> replica shard {} recovered from {} to {}, recovered {}, reuse {}",
|
||||
recoveryState.getShardId().getId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(),
|
||||
recoveryState.getShardId().getId(), recoveryState.getSourceNode().getName(), recoveryState.getTargetNode().getName(),
|
||||
recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes());
|
||||
assertThat("no bytes should be recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered));
|
||||
assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0L));
|
||||
|
@ -469,7 +469,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
} else {
|
||||
if (useSyncIds && !recoveryState.getPrimary()) {
|
||||
logger.info("--> replica shard {} recovered from {} to {} using sync id, recovered {}, reuse {}",
|
||||
recoveryState.getShardId().getId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(),
|
||||
recoveryState.getShardId().getId(), recoveryState.getSourceNode().getName(), recoveryState.getTargetNode().getName(),
|
||||
recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes());
|
||||
}
|
||||
assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0L));
|
||||
|
|
|
@ -124,7 +124,7 @@ public class ReusePeerRecoverySharedTest {
|
|||
}
|
||||
if (!recoveryState.getPrimary() && (useSyncIds == false)) {
|
||||
logger.info("--> replica shard {} recovered from {} to {}, recovered {}, reuse {}", recoveryState.getShardId().getId(),
|
||||
recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(),
|
||||
recoveryState.getSourceNode().getName(), recoveryState.getTargetNode().getName(),
|
||||
recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes());
|
||||
assertThat("no bytes should be recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered));
|
||||
assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0L));
|
||||
|
@ -139,7 +139,7 @@ public class ReusePeerRecoverySharedTest {
|
|||
} else {
|
||||
if (useSyncIds && !recoveryState.getPrimary()) {
|
||||
logger.info("--> replica shard {} recovered from {} to {} using sync id, recovered {}, reuse {}",
|
||||
recoveryState.getShardId().getId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(),
|
||||
recoveryState.getShardId().getId(), recoveryState.getSourceNode().getName(), recoveryState.getTargetNode().getName(),
|
||||
recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes());
|
||||
}
|
||||
assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0L));
|
||||
|
|
|
@ -331,7 +331,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put("index.routing.allocation.include._name", primariesNode.getNode().name())
|
||||
.put("index.routing.allocation.include._name", primariesNode.getNode().getName())
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
|
||||
|
||||
));
|
||||
|
@ -339,8 +339,8 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
final AtomicBoolean corrupt = new AtomicBoolean(true);
|
||||
final CountDownLatch hasCorrupted = new CountDownLatch(1);
|
||||
for (NodeStats dataNode : dataNodeStats) {
|
||||
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().name()));
|
||||
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().name()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
|
||||
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
|
||||
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
|
||||
|
||||
@Override
|
||||
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
|
||||
|
@ -358,7 +358,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
|
||||
Settings build = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
|
||||
.put("index.routing.allocation.include._name", primariesNode.getNode().name() + "," + unluckyNode.getNode().name()).build();
|
||||
.put("index.routing.allocation.include._name", primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName()).build();
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
hasCorrupted.await();
|
||||
|
@ -395,7 +395,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, between(1, 4)) // don't go crazy here it must recovery fast
|
||||
// This does corrupt files on the replica, so we can't check:
|
||||
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
|
||||
.put("index.routing.allocation.include._name", primariesNode.getNode().name())
|
||||
.put("index.routing.allocation.include._name", primariesNode.getNode().getName())
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
|
||||
));
|
||||
ensureGreen();
|
||||
|
@ -411,8 +411,8 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
assertHitCount(countResponse, numDocs);
|
||||
final boolean truncate = randomBoolean();
|
||||
for (NodeStats dataNode : dataNodeStats) {
|
||||
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().name()));
|
||||
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().name()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
|
||||
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
|
||||
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
|
||||
|
||||
@Override
|
||||
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
|
||||
|
|
|
@ -81,8 +81,8 @@ public class ExceptionRetryIT extends ESIntegTestCase {
|
|||
|
||||
//create a transport service that throws a ConnectTransportException for one bulk request and therefore triggers a retry.
|
||||
for (NodeStats dataNode : nodeStats.getNodes()) {
|
||||
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().name()));
|
||||
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().name()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
|
||||
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
|
||||
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
|
||||
|
||||
@Override
|
||||
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
|
||||
|
|
|
@ -122,7 +122,7 @@ public class FlushIT extends ESIntegTestCase {
|
|||
String newNodeName = internalCluster().startNode();
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
|
||||
ShardRouting shardRouting = clusterState.getRoutingTable().index("test").shard(0).iterator().next();
|
||||
String currentNodeName = clusterState.nodes().resolveNode(shardRouting.currentNodeId()).name();
|
||||
String currentNodeName = clusterState.nodes().resolveNode(shardRouting.currentNodeId()).getName();
|
||||
assertFalse(currentNodeName.equals(newNodeName));
|
||||
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, currentNodeName, newNodeName)).get();
|
||||
|
||||
|
|
|
@ -297,12 +297,12 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
long nodeBThrottling = Long.MAX_VALUE;
|
||||
for (NodeStats nodeStats : statsResponse.getNodes()) {
|
||||
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
|
||||
if (nodeStats.getNode().name().equals(nodeA)) {
|
||||
if (nodeStats.getNode().getName().equals(nodeA)) {
|
||||
assertThat("node A should have ongoing recovery as source", recoveryStats.currentAsSource(), equalTo(1));
|
||||
assertThat("node A should not have ongoing recovery as target", recoveryStats.currentAsTarget(), equalTo(0));
|
||||
nodeAThrottling = recoveryStats.throttleTime().millis();
|
||||
}
|
||||
if (nodeStats.getNode().name().equals(nodeB)) {
|
||||
if (nodeStats.getNode().getName().equals(nodeB)) {
|
||||
assertThat("node B should not have ongoing recovery as source", recoveryStats.currentAsSource(), equalTo(0));
|
||||
assertThat("node B should have ongoing recovery as target", recoveryStats.currentAsTarget(), equalTo(1));
|
||||
nodeBThrottling = recoveryStats.throttleTime().millis();
|
||||
|
@ -319,10 +319,10 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
assertThat(statsResponse.getNodes(), arrayWithSize(2));
|
||||
for (NodeStats nodeStats : statsResponse.getNodes()) {
|
||||
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
|
||||
if (nodeStats.getNode().name().equals(nodeA)) {
|
||||
if (nodeStats.getNode().getName().equals(nodeA)) {
|
||||
assertThat("node A throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeAThrottling));
|
||||
}
|
||||
if (nodeStats.getNode().name().equals(nodeB)) {
|
||||
if (nodeStats.getNode().getName().equals(nodeB)) {
|
||||
assertThat("node B throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeBThrottling));
|
||||
}
|
||||
}
|
||||
|
@ -350,10 +350,10 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
|
||||
assertThat(recoveryStats.currentAsSource(), equalTo(0));
|
||||
assertThat(recoveryStats.currentAsTarget(), equalTo(0));
|
||||
if (nodeStats.getNode().name().equals(nodeA)) {
|
||||
if (nodeStats.getNode().getName().equals(nodeA)) {
|
||||
assertThat("node A throttling should be >0", recoveryStats.throttleTime().millis(), greaterThan(0L));
|
||||
}
|
||||
if (nodeStats.getNode().name().equals(nodeB)) {
|
||||
if (nodeStats.getNode().getName().equals(nodeB)) {
|
||||
assertThat("node B throttling should be >0 ", recoveryStats.throttleTime().millis(), greaterThan(0L));
|
||||
}
|
||||
}
|
||||
|
@ -369,10 +369,10 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
|
||||
assertThat(recoveryStats.currentAsSource(), equalTo(0));
|
||||
assertThat(recoveryStats.currentAsTarget(), equalTo(0));
|
||||
if (nodeStats.getNode().name().equals(nodeA)) {
|
||||
if (nodeStats.getNode().getName().equals(nodeA)) {
|
||||
assertThat("node A throttling should be >0", recoveryStats.throttleTime().millis(), greaterThan(0L));
|
||||
}
|
||||
if (nodeStats.getNode().name().equals(nodeB)) {
|
||||
if (nodeStats.getNode().getName().equals(nodeB)) {
|
||||
assertThat("node B throttling should be >0 ", recoveryStats.throttleTime().millis(), greaterThan(0L));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -234,7 +234,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
|||
|
||||
// Check routing tables
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
assertEquals(master, state.nodes().masterNode().name());
|
||||
assertEquals(master, state.nodes().masterNode().getName());
|
||||
List<ShardRouting> shards = state.routingTable().allShards("index");
|
||||
assertThat(shards, hasSize(1));
|
||||
for (ShardRouting shard : shards) {
|
||||
|
@ -352,7 +352,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
|||
|
||||
// Check routing tables
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
assertEquals(master, state.nodes().masterNode().name());
|
||||
assertEquals(master, state.nodes().masterNode().getName());
|
||||
List<ShardRouting> shards = state.routingTable().allShards("index");
|
||||
assertThat(shards, hasSize(2));
|
||||
for (ShardRouting shard : shards) {
|
||||
|
|
|
@ -94,7 +94,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
|||
assertAcked(prepareCreate("test")
|
||||
.addMapping("type1", "field1", "type=text", "the_id", "type=text")
|
||||
.setSettings(settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards())
|
||||
.put("index.routing.allocation.include._name", primariesNode.getNode().name()))); // only allocate on the lucky node
|
||||
.put("index.routing.allocation.include._name", primariesNode.getNode().getName()))); // only allocate on the lucky node
|
||||
|
||||
// index some docs and check if they are coming back
|
||||
int numDocs = randomIntBetween(100, 200);
|
||||
|
@ -116,8 +116,8 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
|||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final AtomicBoolean truncate = new AtomicBoolean(true);
|
||||
for (NodeStats dataNode : dataNodeStats) {
|
||||
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().name()));
|
||||
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().name()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
|
||||
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
|
||||
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
|
||||
|
||||
@Override
|
||||
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
|
||||
|
@ -138,7 +138,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
|||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put("index.routing.allocation.include._name", // now allow allocation on all nodes
|
||||
primariesNode.getNode().name() + "," + unluckyNode.getNode().name())).get();
|
||||
primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName())).get();
|
||||
|
||||
latch.await();
|
||||
|
||||
|
|
|
@ -103,6 +103,6 @@ public class GeoShapeIntegrationTests extends ESIntegTestCase {
|
|||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
IndexShardRoutingTable shard = state.getRoutingTable().index(index).shard(0);
|
||||
String nodeId = shard.assignedShards().get(0).currentNodeId();
|
||||
return state.getNodes().get(nodeId).name();
|
||||
return state.getNodes().get(nodeId).getName();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -231,7 +231,7 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase {
|
|||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndexName())) {
|
||||
String name = clusterState.nodes().get(shardRouting.currentNodeId()).name();
|
||||
String name = clusterState.nodes().get(shardRouting.currentNodeId()).getName();
|
||||
assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,7 +101,6 @@ import org.elasticsearch.index.Index;
|
|||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.IndexWarmer;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.MockEngineFactoryPlugin;
|
||||
|
@ -111,7 +110,6 @@ import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
|||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
import org.elasticsearch.node.NodeMocksPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
@ -1889,7 +1887,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndexName())) {
|
||||
String name = clusterState.nodes().get(shardRouting.currentNodeId()).name();
|
||||
String name = clusterState.nodes().get(shardRouting.currentNodeId()).getName();
|
||||
nodes.add(name);
|
||||
assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ final class ExternalNode implements Closeable {
|
|||
|
||||
Settings clientSettings = settingsBuilder().put(externalNodeSettings)
|
||||
.put("client.transport.nodes_sampler_interval", "1s")
|
||||
.put("node.name", "transport_client_" + nodeInfo.getNode().name())
|
||||
.put("node.name", "transport_client_" + nodeInfo.getNode().getName())
|
||||
.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName).put("client.transport.sniff", false).build();
|
||||
TransportClient client = TransportClient.builder().settings(clientSettings).build();
|
||||
client.addTransportAddress(addr);
|
||||
|
|
|
@ -1363,7 +1363,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
try {
|
||||
Client client = viaNode != null ? client(viaNode) : client();
|
||||
ClusterState state = client.admin().cluster().prepareState().execute().actionGet().getState();
|
||||
return state.nodes().masterNode().name();
|
||||
return state.nodes().masterNode().getName();
|
||||
} catch (Throwable e) {
|
||||
logger.warn("Can't fetch cluster state", e);
|
||||
throw new RuntimeException("Can't get master node " + e.getMessage(), e);
|
||||
|
|
Loading…
Reference in New Issue