svn merge -c 1334043. FIXES: MAPREDUCE-3958. RM: Remove RMNodeState and replace it with NodeState (Bikas Saha via bobby)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1334045 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Joseph Evans 2012-05-04 15:48:13 +00:00
parent 13acee63d3
commit 95a0505c60
17 changed files with 102 additions and 149 deletions

View File

@ -172,6 +172,9 @@ Release 2.0.0 - UNRELEASED
MAPREDUCE-3173. MRV2 UI doesn't work properly without internet (Devaraj K MAPREDUCE-3173. MRV2 UI doesn't work properly without internet (Devaraj K
via bobby) via bobby)
MAPREDUCE-3958. RM: Remove RMNodeState and replace it with NodeState
(Bikas Saha via bobby)
Release 0.23.3 - UNRELEASED Release 0.23.3 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.yarn.api.records; package org.apache.hadoop.yarn.api.records;
// TODO NodeState is a clone of RMNodeState made for MR-3353. In a subsequent
// patch RMNodeState should be replaced with NodeState
/** /**
* <p>State of a <code>Node</code>.</p> * <p>State of a <code>Node</code>.</p>
*/ */

View File

@ -56,7 +56,6 @@
import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@ -302,7 +301,7 @@ public AllocateResponse allocate(AllocateRequest request)
numContainers = schedulerNodeReport.getNumContainers(); numContainers = schedulerNodeReport.getNumContainers();
} }
NodeReport report = BuilderUtils.newNodeReport(rmNode.getNodeID(), NodeReport report = BuilderUtils.newNodeReport(rmNode.getNodeID(),
RMNodeState.toNodeState(rmNode.getState()), rmNode.getState(),
rmNode.getHttpAddress(), rmNode.getRackName(), used, rmNode.getHttpAddress(), rmNode.getRackName(), used,
rmNode.getTotalCapability(), numContainers, rmNode.getTotalCapability(), numContainers,
rmNode.getNodeHealthStatus()); rmNode.getNodeHealthStatus());

View File

@ -79,7 +79,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
@ -414,7 +413,7 @@ private NodeReport createNodeReports(RMNode rmNode) {
} }
NodeReport report = BuilderUtils.newNodeReport(rmNode.getNodeID(), NodeReport report = BuilderUtils.newNodeReport(rmNode.getNodeID(),
RMNodeState.toNodeState(rmNode.getState()), rmNode.getState(),
rmNode.getHttpAddress(), rmNode.getRackName(), used, rmNode.getHttpAddress(), rmNode.getRackName(), used,
rmNode.getTotalCapability(), numContainers, rmNode.getTotalCapability(), numContainers,
rmNode.getNodeHealthStatus()); rmNode.getNodeHealthStatus());

View File

@ -39,6 +39,7 @@
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.Dispatcher;
@ -56,7 +57,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
import org.apache.hadoop.yarn.state.InvalidStateTransitonException; import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
import org.apache.hadoop.yarn.state.MultipleArcTransition; import org.apache.hadoop.yarn.state.MultipleArcTransition;
@ -498,7 +498,7 @@ private void createNewAttempt() {
} }
private void processNodeUpdate(RMAppNodeUpdateType type, RMNode node) { private void processNodeUpdate(RMAppNodeUpdateType type, RMNode node) {
RMNodeState nodeState = node.getState(); NodeState nodeState = node.getState();
updatedNodes.add(node); updatedNodes.add(node);
LOG.debug("Received node update event:" + type + " for node:" + node LOG.debug("Received node update event:" + type + " for node:" + node
+ " with state:" + nodeState); + " with state:" + nodeState);

View File

@ -23,10 +23,10 @@
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
/** /**
@ -99,7 +99,7 @@ public interface RMNode {
*/ */
public Node getNode(); public Node getNode();
public RMNodeState getState(); public NodeState getState();
public List<ContainerId> getContainersToCleanUp(); public List<ContainerId> getContainersToCleanUp();

View File

@ -40,6 +40,7 @@
import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factories.RecordFactory;
@ -104,53 +105,53 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
.newRecordInstance(HeartbeatResponse.class); .newRecordInstance(HeartbeatResponse.class);
private static final StateMachineFactory<RMNodeImpl, private static final StateMachineFactory<RMNodeImpl,
RMNodeState, NodeState,
RMNodeEventType, RMNodeEventType,
RMNodeEvent> stateMachineFactory RMNodeEvent> stateMachineFactory
= new StateMachineFactory<RMNodeImpl, = new StateMachineFactory<RMNodeImpl,
RMNodeState, NodeState,
RMNodeEventType, RMNodeEventType,
RMNodeEvent>(RMNodeState.NEW) RMNodeEvent>(NodeState.NEW)
//Transitions from NEW state //Transitions from NEW state
.addTransition(RMNodeState.NEW, RMNodeState.RUNNING, .addTransition(NodeState.NEW, NodeState.RUNNING,
RMNodeEventType.STARTED, new AddNodeTransition()) RMNodeEventType.STARTED, new AddNodeTransition())
//Transitions from RUNNING state //Transitions from RUNNING state
.addTransition(RMNodeState.RUNNING, .addTransition(NodeState.RUNNING,
EnumSet.of(RMNodeState.RUNNING, RMNodeState.UNHEALTHY), EnumSet.of(NodeState.RUNNING, NodeState.UNHEALTHY),
RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenHealthyTransition()) RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenHealthyTransition())
.addTransition(RMNodeState.RUNNING, RMNodeState.DECOMMISSIONED, .addTransition(NodeState.RUNNING, NodeState.DECOMMISSIONED,
RMNodeEventType.DECOMMISSION, RMNodeEventType.DECOMMISSION,
new DeactivateNodeTransition(RMNodeState.DECOMMISSIONED)) new DeactivateNodeTransition(NodeState.DECOMMISSIONED))
.addTransition(RMNodeState.RUNNING, RMNodeState.LOST, .addTransition(NodeState.RUNNING, NodeState.LOST,
RMNodeEventType.EXPIRE, RMNodeEventType.EXPIRE,
new DeactivateNodeTransition(RMNodeState.LOST)) new DeactivateNodeTransition(NodeState.LOST))
.addTransition(RMNodeState.RUNNING, RMNodeState.REBOOTED, .addTransition(NodeState.RUNNING, NodeState.REBOOTED,
RMNodeEventType.REBOOTING, RMNodeEventType.REBOOTING,
new DeactivateNodeTransition(RMNodeState.REBOOTED)) new DeactivateNodeTransition(NodeState.REBOOTED))
.addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING, .addTransition(NodeState.RUNNING, NodeState.RUNNING,
RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition()) RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition())
.addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING, .addTransition(NodeState.RUNNING, NodeState.RUNNING,
RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition()) RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition())
.addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING, .addTransition(NodeState.RUNNING, NodeState.RUNNING,
RMNodeEventType.RECONNECTED, new ReconnectNodeTransition()) RMNodeEventType.RECONNECTED, new ReconnectNodeTransition())
//Transitions from UNHEALTHY state //Transitions from UNHEALTHY state
.addTransition(RMNodeState.UNHEALTHY, .addTransition(NodeState.UNHEALTHY,
EnumSet.of(RMNodeState.UNHEALTHY, RMNodeState.RUNNING), EnumSet.of(NodeState.UNHEALTHY, NodeState.RUNNING),
RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenUnHealthyTransition()) RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenUnHealthyTransition())
.addTransition(RMNodeState.UNHEALTHY, RMNodeState.UNHEALTHY, .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY,
RMNodeEventType.RECONNECTED, new ReconnectNodeTransition()) RMNodeEventType.RECONNECTED, new ReconnectNodeTransition())
.addTransition(RMNodeState.UNHEALTHY, RMNodeState.UNHEALTHY, .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY,
RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition()) RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition())
.addTransition(RMNodeState.UNHEALTHY, RMNodeState.UNHEALTHY, .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY,
RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition()) RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition())
// create the topology tables // create the topology tables
.installTopology(); .installTopology();
private final StateMachine<RMNodeState, RMNodeEventType, private final StateMachine<NodeState, RMNodeEventType,
RMNodeEvent> stateMachine; RMNodeEvent> stateMachine;
public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, public RMNodeImpl(NodeId nodeId, RMContext context, String hostName,
@ -252,7 +253,7 @@ private void setNodeHealthStatus(NodeHealthStatus status)
} }
@Override @Override
public RMNodeState getState() { public NodeState getState() {
this.readLock.lock(); this.readLock.lock();
try { try {
@ -302,7 +303,7 @@ public void handle(RMNodeEvent event) {
LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType()); LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType());
try { try {
writeLock.lock(); writeLock.lock();
RMNodeState oldState = getState(); NodeState oldState = getState();
try { try {
stateMachine.doTransition(event.getType(), event); stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitonException e) { } catch (InvalidStateTransitonException e) {
@ -321,7 +322,7 @@ public void handle(RMNodeEvent event) {
} }
} }
private void updateMetricsForRejoinedNode(RMNodeState previousNodeState) { private void updateMetricsForRejoinedNode(NodeState previousNodeState) {
ClusterMetrics metrics = ClusterMetrics.getMetrics(); ClusterMetrics metrics = ClusterMetrics.getMetrics();
metrics.incrNumActiveNodes(); metrics.incrNumActiveNodes();
@ -341,7 +342,7 @@ private void updateMetricsForRejoinedNode(RMNodeState previousNodeState) {
} }
} }
private void updateMetricsForDeactivatedNode(RMNodeState finalState) { private void updateMetricsForDeactivatedNode(NodeState finalState) {
ClusterMetrics metrics = ClusterMetrics.getMetrics(); ClusterMetrics metrics = ClusterMetrics.getMetrics();
metrics.decrNumActiveNodes(); metrics.decrNumActiveNodes();
@ -440,8 +441,8 @@ public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
public static class DeactivateNodeTransition public static class DeactivateNodeTransition
implements SingleArcTransition<RMNodeImpl, RMNodeEvent> { implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
private final RMNodeState finalState; private final NodeState finalState;
public DeactivateNodeTransition(RMNodeState finalState) { public DeactivateNodeTransition(NodeState finalState) {
this.finalState = finalState; this.finalState = finalState;
} }
@ -466,9 +467,9 @@ public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
} }
public static class StatusUpdateWhenHealthyTransition implements public static class StatusUpdateWhenHealthyTransition implements
MultipleArcTransition<RMNodeImpl, RMNodeEvent, RMNodeState> { MultipleArcTransition<RMNodeImpl, RMNodeEvent, NodeState> {
@Override @Override
public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event; RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
@ -486,8 +487,8 @@ public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
new NodesListManagerEvent( new NodesListManagerEvent(
NodesListManagerEventType.NODE_UNUSABLE, rmNode)); NodesListManagerEventType.NODE_UNUSABLE, rmNode));
// Update metrics // Update metrics
rmNode.updateMetricsForDeactivatedNode(RMNodeState.UNHEALTHY); rmNode.updateMetricsForDeactivatedNode(NodeState.UNHEALTHY);
return RMNodeState.UNHEALTHY; return NodeState.UNHEALTHY;
} }
// Filter the map to only obtain just launched containers and finished // Filter the map to only obtain just launched containers and finished
@ -541,15 +542,15 @@ public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
rmNode.containersToClean.clear(); rmNode.containersToClean.clear();
rmNode.finishedApplications.clear(); rmNode.finishedApplications.clear();
return RMNodeState.RUNNING; return NodeState.RUNNING;
} }
} }
public static class StatusUpdateWhenUnHealthyTransition implements public static class StatusUpdateWhenUnHealthyTransition implements
MultipleArcTransition<RMNodeImpl, RMNodeEvent, RMNodeState> { MultipleArcTransition<RMNodeImpl, RMNodeEvent, NodeState> {
@Override @Override
public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event; RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
// Switch the last heartbeatresponse. // Switch the last heartbeatresponse.
@ -566,11 +567,11 @@ public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
// notifiers get update metadata because they will very likely query it // notifiers get update metadata because they will very likely query it
// upon notification // upon notification
// Update metrics // Update metrics
rmNode.updateMetricsForRejoinedNode(RMNodeState.UNHEALTHY); rmNode.updateMetricsForRejoinedNode(NodeState.UNHEALTHY);
return RMNodeState.RUNNING; return NodeState.RUNNING;
} }
return RMNodeState.UNHEALTHY; return NodeState.UNHEALTHY;
} }
} }
} }

View File

@ -1,45 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import org.apache.hadoop.yarn.api.records.NodeState;
//TODO yarn.api.records.NodeState is a clone of RMNodeState made for MR-3353.
// In a subsequent patch RMNodeState should be replaced with NodeState
public enum RMNodeState {
NEW, RUNNING, UNHEALTHY, DECOMMISSIONED, LOST, REBOOTED;
public static NodeState toNodeState(RMNodeState state) {
switch(state) {
case NEW:
return NodeState.NEW;
case RUNNING:
return NodeState.RUNNING;
case UNHEALTHY:
return NodeState.UNHEALTHY;
case DECOMMISSIONED:
return NodeState.DECOMMISSIONED;
case LOST:
return NodeState.LOST;
case REBOOTED:
return NodeState.REBOOTED;
}
return null;
}
};

View File

@ -27,10 +27,10 @@
import java.util.Collection; import java.util.Collection;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.util.Times;
@ -78,9 +78,9 @@ protected void render(Block html) {
th(".mem", "Mem Avail"). th(".mem", "Mem Avail").
_()._(). _()._().
tbody(); tbody();
RMNodeState stateFilter = null; NodeState stateFilter = null;
if(type != null && !type.isEmpty()) { if(type != null && !type.isEmpty()) {
stateFilter = RMNodeState.valueOf(type.toUpperCase()); stateFilter = NodeState.valueOf(type.toUpperCase());
} }
Collection<RMNode> rmNodes = this.rmContext.getRMNodes().values(); Collection<RMNode> rmNodes = this.rmContext.getRMNodes().values();
boolean isInactive = false; boolean isInactive = false;
@ -96,14 +96,14 @@ protected void render(Block html) {
} }
for (RMNode ni : rmNodes) { for (RMNode ni : rmNodes) {
if(stateFilter != null) { if(stateFilter != null) {
RMNodeState state = ni.getState(); NodeState state = ni.getState();
if(!stateFilter.equals(state)) { if(!stateFilter.equals(state)) {
continue; continue;
} }
} else { } else {
// No filter. User is asking for all nodes. Make sure you skip the // No filter. User is asking for all nodes. Make sure you skip the
// unhealthy nodes. // unhealthy nodes.
if (ni.getState() == RMNodeState.UNHEALTHY) { if (ni.getState() == NodeState.UNHEALTHY) {
continue; continue;
} }
} }

View File

@ -39,13 +39,13 @@
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
@ -162,7 +162,7 @@ public NodesInfo getNodes(@QueryParam("state") String filterState,
Collection<RMNode> rmNodes = this.rm.getRMContext().getRMNodes().values(); Collection<RMNode> rmNodes = this.rm.getRMContext().getRMNodes().values();
boolean isInactive = false; boolean isInactive = false;
if (filterState != null && !filterState.isEmpty()) { if (filterState != null && !filterState.isEmpty()) {
RMNodeState nodeState = RMNodeState.valueOf(filterState.toUpperCase()); NodeState nodeState = NodeState.valueOf(filterState.toUpperCase());
switch (nodeState) { switch (nodeState) {
case DECOMMISSIONED: case DECOMMISSIONED:
case LOST: case LOST:
@ -182,7 +182,7 @@ public NodesInfo getNodes(@QueryParam("state") String filterState,
} else { } else {
// No filter. User is asking for all nodes. Make sure you skip the // No filter. User is asking for all nodes. Make sure you skip the
// unhealthy nodes. // unhealthy nodes.
if (ni.getState() == RMNodeState.UNHEALTHY) { if (ni.getState() == NodeState.UNHEALTHY) {
continue; continue;
} }
} }

View File

@ -25,8 +25,8 @@
import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
@ -35,7 +35,7 @@
public class NodeInfo { public class NodeInfo {
protected String rack; protected String rack;
protected RMNodeState state; protected NodeState state;
protected String id; protected String id;
protected String nodeHostName; protected String nodeHostName;
protected String nodeHTTPAddress; protected String nodeHTTPAddress;

View File

@ -26,11 +26,11 @@
import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
@ -48,7 +48,7 @@ public static List<RMNode> newNodes(int racks, int nodesPerRack,
for (int j = 0; j < nodesPerRack; ++j) { for (int j = 0; j < nodesPerRack; ++j) {
if (j == (nodesPerRack - 1)) { if (j == (nodesPerRack - 1)) {
// One unhealthy node per rack. // One unhealthy node per rack.
list.add(nodeInfo(i, perNode, RMNodeState.UNHEALTHY)); list.add(nodeInfo(i, perNode, NodeState.UNHEALTHY));
} }
list.add(newNodeInfo(i, perNode)); list.add(newNodeInfo(i, perNode));
} }
@ -61,7 +61,7 @@ public static List<RMNode> deactivatedNodes(int racks, int nodesPerRack,
List<RMNode> list = Lists.newArrayList(); List<RMNode> list = Lists.newArrayList();
for (int i = 0; i < racks; ++i) { for (int i = 0; i < racks; ++i) {
for (int j = 0; j < nodesPerRack; ++j) { for (int j = 0; j < nodesPerRack; ++j) {
RMNodeState[] allStates = RMNodeState.values(); NodeState[] allStates = NodeState.values();
list.add(nodeInfo(i, perNode, allStates[j % allStates.length])); list.add(nodeInfo(i, perNode, allStates[j % allStates.length]));
} }
} }
@ -102,11 +102,11 @@ private static class MockRMNodeImpl implements RMNode {
private Resource perNode; private Resource perNode;
private String rackName; private String rackName;
private NodeHealthStatus nodeHealthStatus; private NodeHealthStatus nodeHealthStatus;
private RMNodeState state; private NodeState state;
public MockRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress, public MockRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress,
Resource perNode, String rackName, NodeHealthStatus nodeHealthStatus, Resource perNode, String rackName, NodeHealthStatus nodeHealthStatus,
int cmdPort, String hostName, RMNodeState state) { int cmdPort, String hostName, NodeState state) {
this.nodeId = nodeId; this.nodeId = nodeId;
this.nodeAddr = nodeAddr; this.nodeAddr = nodeAddr;
this.httpAddress = httpAddress; this.httpAddress = httpAddress;
@ -169,7 +169,7 @@ public Node getNode() {
} }
@Override @Override
public RMNodeState getState() { public NodeState getState() {
return this.state; return this.state;
} }
@ -189,11 +189,11 @@ public HeartbeatResponse getLastHeartBeatResponse() {
} }
}; };
private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr) { private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr) {
return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++); return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++);
} }
private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr, int hostnum) { private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr, int hostnum) {
final String rackName = "rack"+ rack; final String rackName = "rack"+ rack;
final int nid = hostnum; final int nid = hostnum;
final String hostName = "host"+ nid; final String hostName = "host"+ nid;
@ -202,7 +202,7 @@ private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState
final String httpAddress = httpAddr; final String httpAddress = httpAddr;
final NodeHealthStatus nodeHealthStatus = final NodeHealthStatus nodeHealthStatus =
recordFactory.newRecordInstance(NodeHealthStatus.class); recordFactory.newRecordInstance(NodeHealthStatus.class);
if (state != RMNodeState.UNHEALTHY) { if (state != NodeState.UNHEALTHY) {
nodeHealthStatus.setIsNodeHealthy(true); nodeHealthStatus.setIsNodeHealthy(true);
nodeHealthStatus.setHealthReport("HealthyMe"); nodeHealthStatus.setHealthReport("HealthyMe");
} }
@ -211,12 +211,12 @@ private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState
} }
public static RMNode nodeInfo(int rack, final Resource perNode, public static RMNode nodeInfo(int rack, final Resource perNode,
RMNodeState state) { NodeState state) {
return buildRMNode(rack, perNode, state, "N/A"); return buildRMNode(rack, perNode, state, "N/A");
} }
public static RMNode newNodeInfo(int rack, final Resource perNode) { public static RMNode newNodeInfo(int rack, final Resource perNode) {
return buildRMNode(rack, perNode, RMNodeState.RUNNING, "localhost:0"); return buildRMNode(rack, perNode, NodeState.RUNNING, "localhost:0");
} }
public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum) { public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum) {

View File

@ -35,6 +35,7 @@
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory; import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
@ -48,7 +49,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.apache.log4j.LogManager; import org.apache.log4j.LogManager;
@ -146,7 +146,7 @@ public void sendNodeLost(MockNM nm) throws Exception {
node.handle(new RMNodeEvent(nm.getNodeId(), RMNodeEventType.EXPIRE)); node.handle(new RMNodeEvent(nm.getNodeId(), RMNodeEventType.EXPIRE));
} }
public void NMwaitForState(NodeId nodeid, RMNodeState finalState) public void NMwaitForState(NodeId nodeid, NodeState finalState)
throws Exception { throws Exception {
RMNode node = getRMContext().getRMNodes().get(nodeid); RMNode node = getRMContext().getRMNodes().get(nodeid);
Assert.assertNotNull("node shouldn't be null", node); Assert.assertNotNull("node shouldn't be null", node);

View File

@ -33,7 +33,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.BuilderUtils;
import org.junit.After; import org.junit.After;
@ -83,7 +82,7 @@ private void syncNodeHeartbeat(MockNM nm, boolean health) throws Exception {
private void syncNodeLost(MockNM nm) throws Exception { private void syncNodeLost(MockNM nm) throws Exception {
rm.sendNodeStarted(nm); rm.sendNodeStarted(nm);
rm.NMwaitForState(nm.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm.getNodeId(), NodeState.RUNNING);
rm.sendNodeLost(nm); rm.sendNodeLost(nm);
dispatcher.await(); dispatcher.await();
} }

View File

@ -20,9 +20,9 @@
import java.io.IOException; import java.io.IOException;
import java.io.PrintWriter; import java.io.PrintWriter;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.NodesPage.NodesBlock; import org.apache.hadoop.yarn.server.resourcemanager.webapp.NodesPage.NodesBlock;
import org.apache.hadoop.yarn.webapp.test.WebAppTests; import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Before; import org.junit.Before;
@ -44,7 +44,7 @@ public class TestNodesPage {
// The following is because of the way TestRMWebApp.mockRMContext creates // The following is because of the way TestRMWebApp.mockRMContext creates
// nodes. // nodes.
final int numberOfLostNodesPerRack = numberOfNodesPerRack final int numberOfLostNodesPerRack = numberOfNodesPerRack
/ RMNodeState.values().length; / NodeState.values().length;
// Number of Actual Table Headers for NodesPage.NodesBlock might change in // Number of Actual Table Headers for NodesPage.NodesBlock might change in
// future. In that case this value should be adjusted to the new value. // future. In that case this value should be adjusted to the new value.

View File

@ -31,6 +31,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@ -41,7 +42,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
@ -124,13 +124,13 @@ public void configure(Binder binder) {
// Unhealthy nodes // Unhealthy nodes
instance.moreParams().put(YarnWebParams.NODE_STATE, instance.moreParams().put(YarnWebParams.NODE_STATE,
RMNodeState.UNHEALTHY.toString()); NodeState.UNHEALTHY.toString());
instance.render(); instance.render();
WebAppTests.flushOutput(injector); WebAppTests.flushOutput(injector);
// Lost nodes // Lost nodes
instance.moreParams().put(YarnWebParams.NODE_STATE, instance.moreParams().put(YarnWebParams.NODE_STATE,
RMNodeState.LOST.toString()); NodeState.LOST.toString());
instance.render(); instance.render();
WebAppTests.flushOutput(injector); WebAppTests.flushOutput(injector);

View File

@ -32,13 +32,13 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
@ -54,7 +54,6 @@
import org.w3c.dom.Element; import org.w3c.dom.Element;
import org.w3c.dom.NodeList; import org.w3c.dom.NodeList;
import org.xml.sax.InputSource; import org.xml.sax.InputSource;
import com.google.inject.Guice; import com.google.inject.Guice;
import com.google.inject.Injector; import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener; import com.google.inject.servlet.GuiceServletContextListener;
@ -131,15 +130,15 @@ public void testNodesDefaultWithUnHealthyNode() throws JSONException,
MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121); MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm1);
rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
// One unhealthy node which should not appear in the list after // One unhealthy node which should not appear in the list after
// MAPREDUCE-3760. // MAPREDUCE-3760.
MockNM nm3 = rm.registerNode("h3:1236", 5122); MockNM nm3 = rm.registerNode("h3:1236", 5122);
rm.NMwaitForState(nm3.getNodeId(), RMNodeState.NEW); rm.NMwaitForState(nm3.getNodeId(), NodeState.NEW);
rm.sendNodeStarted(nm3); rm.sendNodeStarted(nm3);
rm.NMwaitForState(nm3.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm3.getNodeId(), NodeState.RUNNING);
RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes() RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes()
.get(nm3.getNodeId()); .get(nm3.getNodeId());
NodeHealthStatus nodeHealth = node.getNodeHealthStatus(); NodeHealthStatus nodeHealth = node.getNodeHealthStatus();
@ -147,7 +146,7 @@ public void testNodesDefaultWithUnHealthyNode() throws JSONException,
nodeHealth.setIsNodeHealthy(false); nodeHealth.setIsNodeHealthy(false);
node.handle(new RMNodeStatusEvent(nm3.getNodeId(), nodeHealth, node.handle(new RMNodeStatusEvent(nm3.getNodeId(), nodeHealth,
new ArrayList<ContainerStatus>(), null, null)); new ArrayList<ContainerStatus>(), null, null));
rm.NMwaitForState(nm3.getNodeId(), RMNodeState.UNHEALTHY); rm.NMwaitForState(nm3.getNodeId(), NodeState.UNHEALTHY);
ClientResponse response = ClientResponse response =
r.path("ws").path("v1").path("cluster").path("nodes") r.path("ws").path("v1").path("cluster").path("nodes")
@ -169,11 +168,11 @@ public void testNodesQueryState() throws JSONException, Exception {
MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121); MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm1);
rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
ClientResponse response = r.path("ws").path("v1").path("cluster") ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes").queryParam("state", RMNodeState.RUNNING.toString()) .path("nodes").queryParam("state", NodeState.RUNNING.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
@ -196,7 +195,7 @@ public void testNodesQueryStateNone() throws JSONException, Exception {
ClientResponse response = r.path("ws").path("v1").path("cluster") ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes") .path("nodes")
.queryParam("state", RMNodeState.DECOMMISSIONED.toString()) .queryParam("state", NodeState.DECOMMISSIONED.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class); JSONObject json = response.getEntity(JSONObject.class);
@ -231,7 +230,7 @@ public void testNodesQueryStateInvalid() throws JSONException, Exception {
WebServicesTestUtils WebServicesTestUtils
.checkStringMatch( .checkStringMatch(
"exception message", "exception message",
"No enum const class org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState.BOGUSSTATE", "No enum const class org.apache.hadoop.yarn.api.records.NodeState.BOGUSSTATE",
message); message);
WebServicesTestUtils.checkStringMatch("exception type", WebServicesTestUtils.checkStringMatch("exception type",
"IllegalArgumentException", type); "IllegalArgumentException", type);
@ -250,13 +249,13 @@ public void testNodesQueryStateLost() throws JSONException, Exception {
MockNM nm2 = rm.registerNode("h2:1234", 5120); MockNM nm2 = rm.registerNode("h2:1234", 5120);
rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm1);
rm.sendNodeStarted(nm2); rm.sendNodeStarted(nm2);
rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(), NodeState.RUNNING);
rm.sendNodeLost(nm1); rm.sendNodeLost(nm1);
rm.sendNodeLost(nm2); rm.sendNodeLost(nm2);
ClientResponse response = r.path("ws").path("v1").path("cluster") ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes").queryParam("state", RMNodeState.LOST.toString()) .path("nodes").queryParam("state", NodeState.LOST.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
@ -283,8 +282,8 @@ public void testSingleNodeQueryStateLost() throws JSONException, Exception {
MockNM nm2 = rm.registerNode("h2:1234", 5120); MockNM nm2 = rm.registerNode("h2:1234", 5120);
rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm1);
rm.sendNodeStarted(nm2); rm.sendNodeStarted(nm2);
rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(), NodeState.RUNNING);
rm.sendNodeLost(nm1); rm.sendNodeLost(nm1);
rm.sendNodeLost(nm2); rm.sendNodeLost(nm2);
@ -312,8 +311,8 @@ public void testNodesQueryHealthy() throws JSONException, Exception {
MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121); MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm1);
rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
ClientResponse response = r.path("ws").path("v1").path("cluster") ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes").queryParam("healthy", "true") .path("nodes").queryParam("healthy", "true")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
@ -332,8 +331,8 @@ public void testNodesQueryHealthyCase() throws JSONException, Exception {
MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121); MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm1);
rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
ClientResponse response = r.path("ws").path("v1").path("cluster") ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes").queryParam("healthy", "TRUe") .path("nodes").queryParam("healthy", "TRUe")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
@ -353,8 +352,8 @@ public void testNodesQueryHealthyAndState() throws JSONException, Exception {
MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121); MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm1);
rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes() RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes()
.get(nm1.getNodeId()); .get(nm1.getNodeId());
NodeHealthStatus nodeHealth = node.getNodeHealthStatus(); NodeHealthStatus nodeHealth = node.getNodeHealthStatus();
@ -362,11 +361,11 @@ public void testNodesQueryHealthyAndState() throws JSONException, Exception {
nodeHealth.setIsNodeHealthy(false); nodeHealth.setIsNodeHealthy(false);
node.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeHealth, node.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeHealth,
new ArrayList<ContainerStatus>(), null, null)); new ArrayList<ContainerStatus>(), null, null));
rm.NMwaitForState(nm1.getNodeId(), RMNodeState.UNHEALTHY); rm.NMwaitForState(nm1.getNodeId(), NodeState.UNHEALTHY);
ClientResponse response = r.path("ws").path("v1").path("cluster") ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes").queryParam("healthy", "true") .path("nodes").queryParam("healthy", "true")
.queryParam("state", RMNodeState.RUNNING.toString()) .queryParam("state", NodeState.RUNNING.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class); JSONObject json = response.getEntity(JSONObject.class);
@ -380,8 +379,8 @@ public void testNodesQueryHealthyFalse() throws JSONException, Exception {
MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121); MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm1);
rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
ClientResponse response = r.path("ws").path("v1").path("cluster") ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes").queryParam("healthy", "false") .path("nodes").queryParam("healthy", "false")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);