YARN-4293. ResourceUtilization should be a part of yarn node CLI. (Sunil G via wangda)

(cherry picked from commit 79c41b1d83)
(cherry picked from commit 9f4a6ec663)
This commit is contained in:
Wangda Tan 2015-12-16 13:18:19 -08:00
parent e4f638d815
commit 9ed9eb265f
29 changed files with 248 additions and 35 deletions

View File

@ -34,8 +34,8 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode

View File

@ -27,8 +27,8 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode

View File

@ -551,6 +551,8 @@ Release 2.8.0 - UNRELEASED
YARN-4418. AM Resource Limit per partition can be updated to ResourceUsage as well.
(Sunil G via wangda)
YARN-4293. ResourceUtilization should be a part of yarn node CLI. (Sunil G via wangda)
OPTIMIZATIONS
YARN-3339. TestDockerContainerExecutor should pull a single image and not

View File

@ -196,4 +196,29 @@ public abstract class NodeReport {
@Private
@Unstable
public abstract void setNodeLabels(Set<String> nodeLabels);
/**
* Get containers aggregated resource utilization in a node
* @return containers resource utilization
*/
@Public
@Stable
public abstract ResourceUtilization getAggregatedContainersUtilization();
@Private
@Unstable
public abstract void setAggregatedContainersUtilization(ResourceUtilization
containersUtilization);
/**
* Get node resource utilization
* @return node resource utilization
*/
@Public
@Stable
public abstract ResourceUtilization getNodeUtilization();
@Private
@Unstable
public abstract void setNodeUtilization(ResourceUtilization nodeUtilization);
}

View File

@ -16,10 +16,10 @@
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records;
package org.apache.hadoop.yarn.api.records;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records;
/**
@ -28,10 +28,13 @@ import org.apache.hadoop.yarn.util.Records;
* resources in the cluster.
* </p>
*/
@Private
@Evolving
@Public
@Unstable
public abstract class ResourceUtilization implements
Comparable<ResourceUtilization> {
@Public
@Unstable
public static ResourceUtilization newInstance(int pmem, int vmem, float cpu) {
ResourceUtilization utilization =
Records.newRecord(ResourceUtilization.class);
@ -46,6 +49,8 @@ public abstract class ResourceUtilization implements
*
* @return <em>virtual memory</em> in MB
*/
@Public
@Unstable
public abstract int getVirtualMemory();
/**
@ -53,6 +58,8 @@ public abstract class ResourceUtilization implements
*
* @param vmem <em>virtual memory</em> in MB
*/
@Public
@Unstable
public abstract void setVirtualMemory(int vmem);
/**
@ -60,6 +67,8 @@ public abstract class ResourceUtilization implements
*
* @return <em>physical memory</em> in MB
*/
@Public
@Unstable
public abstract int getPhysicalMemory();
/**
@ -67,6 +76,8 @@ public abstract class ResourceUtilization implements
*
* @param pmem <em>physical memory</em> in MB
*/
@Public
@Unstable
public abstract void setPhysicalMemory(int pmem);
/**
@ -74,6 +85,8 @@ public abstract class ResourceUtilization implements
*
* @return <em>CPU utilization</em> normalized to 1 CPU
*/
@Public
@Unstable
public abstract float getCPU();
/**
@ -81,6 +94,8 @@ public abstract class ResourceUtilization implements
*
* @param cpu <em>CPU utilization</em> normalized to 1 CPU
*/
@Public
@Unstable
public abstract void setCPU(float cpu);
@Override
@ -125,6 +140,8 @@ public abstract class ResourceUtilization implements
* @param vmem Virtual memory used to add.
* @param cpu CPU utilization to add.
*/
@Public
@Unstable
public void addTo(int pmem, int vmem, float cpu) {
this.setPhysicalMemory(this.getPhysicalMemory() + pmem);
this.setVirtualMemory(this.getVirtualMemory() + vmem);

View File

@ -58,6 +58,12 @@ message ResourceProto {
optional int32 virtual_cores = 2;
}
message ResourceUtilizationProto {
optional int32 pmem = 1;
optional int32 vmem = 2;
optional float cpu = 3;
}
message ResourceOptionProto {
optional ResourceProto resource = 1;
optional int32 over_commit_timeout = 2;
@ -254,6 +260,8 @@ message NodeReportProto {
optional string health_report = 8;
optional int64 last_health_report_time = 9;
repeated string node_labels = 10;
optional ResourceUtilizationProto containers_utilization = 11;
optional ResourceUtilizationProto node_utilization = 12;
}
message NodeIdToLabelsInfoProto {

View File

@ -218,6 +218,27 @@ public class NodeCLI extends YarnCLI {
new ArrayList<String>(report.getNodeLabels());
Collections.sort(nodeLabelsList);
nodeReportStr.println(StringUtils.join(nodeLabelsList.iterator(), ','));
nodeReportStr.print("\tResource Utilization by Node : ");
if (nodeReport.getNodeUtilization() != null) {
nodeReportStr.print("PMem:"
+ nodeReport.getNodeUtilization().getPhysicalMemory()
+ " MB, VMem:" + nodeReport.getNodeUtilization().getVirtualMemory()
+ " MB, VCores:" + nodeReport.getNodeUtilization().getCPU());
}
nodeReportStr.println();
nodeReportStr.print("\tResource Utilization by Containers : ");
if (nodeReport.getAggregatedContainersUtilization() != null) {
nodeReportStr.print("PMem:"
+ nodeReport.getAggregatedContainersUtilization()
.getPhysicalMemory()
+ " MB, VMem:"
+ nodeReport.getAggregatedContainersUtilization()
.getVirtualMemory() + " MB, VCores:"
+ nodeReport.getAggregatedContainersUtilization().getCPU());
}
nodeReportStr.println();
}
if (nodeReport == null) {

View File

@ -64,6 +64,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
@ -1178,6 +1179,8 @@ public class TestYarnCLI {
pw.println("\tCPU-Used : 0 vcores");
pw.println("\tCPU-Capacity : 0 vcores");
pw.println("\tNode-Labels : a,b,c,x,y,z");
pw.println("\tResource Utilization by Node : PMem:2048 MB, VMem:4096 MB, VCores:8.0");
pw.println("\tResource Utilization by Containers : PMem:1024 MB, VMem:2048 MB, VCores:4.0");
pw.close();
String nodeStatusStr = baos.toString("UTF-8");
verify(sysOut, times(1)).println(isA(String.class));
@ -1212,6 +1215,44 @@ public class TestYarnCLI {
pw.println("\tCPU-Used : 0 vcores");
pw.println("\tCPU-Capacity : 0 vcores");
pw.println("\tNode-Labels : ");
pw.println("\tResource Utilization by Node : PMem:2048 MB, VMem:4096 MB, VCores:8.0");
pw.println("\tResource Utilization by Containers : PMem:1024 MB, VMem:2048 MB, VCores:4.0");
pw.close();
String nodeStatusStr = baos.toString("UTF-8");
verify(sysOut, times(1)).println(isA(String.class));
verify(sysOut).println(nodeStatusStr);
}
@Test
public void testNodeStatusWithEmptyResourceUtilization() throws Exception {
NodeId nodeId = NodeId.newInstance("host0", 0);
NodeCLI cli = new NodeCLI();
when(client.getNodeReports()).thenReturn(
getNodeReports(3, NodeState.RUNNING, false, true));
cli.setClient(client);
cli.setSysOutPrintStream(sysOut);
cli.setSysErrPrintStream(sysErr);
int result = cli.run(new String[] { "-status", nodeId.toString() });
assertEquals(0, result);
verify(client).getNodeReports();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintWriter pw = new PrintWriter(baos);
pw.println("Node Report : ");
pw.println("\tNode-Id : host0:0");
pw.println("\tRack : rack1");
pw.println("\tNode-State : RUNNING");
pw.println("\tNode-Http-Address : host1:8888");
pw.println("\tLast-Health-Update : "
+ DateFormatUtils.format(new Date(0), "E dd/MMM/yy hh:mm:ss:SSzz"));
pw.println("\tHealth-Report : ");
pw.println("\tContainers : 0");
pw.println("\tMemory-Used : 0MB");
pw.println("\tMemory-Capacity : 0MB");
pw.println("\tCPU-Used : 0 vcores");
pw.println("\tCPU-Capacity : 0 vcores");
pw.println("\tNode-Labels : a,b,c,x,y,z");
pw.println("\tResource Utilization by Node : ");
pw.println("\tResource Utilization by Containers : ");
pw.close();
String nodeStatusStr = baos.toString("UTF-8");
verify(sysOut, times(1)).println(isA(String.class));
@ -1463,11 +1504,16 @@ public class TestYarnCLI {
}
private List<NodeReport> getNodeReports(int noOfNodes, NodeState state) {
return getNodeReports(noOfNodes, state, true);
return getNodeReports(noOfNodes, state, true, false);
}
private List<NodeReport> getNodeReports(int noOfNodes, NodeState state,
boolean emptyNodeLabel) {
return getNodeReports(noOfNodes, state, emptyNodeLabel, false);
}
private List<NodeReport> getNodeReports(int noOfNodes, NodeState state,
boolean emptyNodeLabel, boolean emptyResourceUtilization) {
List<NodeReport> nodeReports = new ArrayList<NodeReport>();
for (int i = 0; i < noOfNodes; i++) {
@ -1481,6 +1527,14 @@ public class TestYarnCLI {
.newInstance("host" + i, 0), state, "host" + 1 + ":8888",
"rack1", Records.newRecord(Resource.class), Records
.newRecord(Resource.class), 0, "", 0, nodeLabels);
if (!emptyResourceUtilization) {
ResourceUtilization containersUtilization = ResourceUtilization
.newInstance(1024, 2048, 4);
ResourceUtilization nodeUtilization = ResourceUtilization.newInstance(
2048, 4096, 8);
nodeReport.setAggregatedContainersUtilization(containersUtilization);
nodeReport.setNodeUtilization(nodeUtilization);
}
nodeReports.add(nodeReport);
}
return nodeReports;

View File

@ -27,10 +27,12 @@ import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceUtilizationProto;
import com.google.protobuf.TextFormat;
@ -44,6 +46,8 @@ public class NodeReportPBImpl extends NodeReport {
private NodeId nodeId;
private Resource used;
private Resource capability;
private ResourceUtilization containersUtilization = null;
private ResourceUtilization nodeUtilization = null;
Set<String> labels;
public NodeReportPBImpl() {
@ -263,6 +267,18 @@ public class NodeReportPBImpl extends NodeReport {
builder.clearNodeLabels();
builder.addAllNodeLabels(this.labels);
}
if (this.nodeUtilization != null
&& !((ResourceUtilizationPBImpl) this.nodeUtilization).getProto()
.equals(builder.getNodeUtilization())) {
builder.setNodeUtilization(convertToProtoFormat(this.nodeUtilization));
}
if (this.containersUtilization != null
&& !((ResourceUtilizationPBImpl) this.containersUtilization).getProto()
.equals(builder.getContainersUtilization())) {
builder
.setContainersUtilization(convertToProtoFormat(
this.containersUtilization));
}
}
private void mergeLocalToProto() {
@ -297,6 +313,15 @@ public class NodeReportPBImpl extends NodeReport {
return ((ResourcePBImpl) r).getProto();
}
private ResourceUtilizationPBImpl convertFromProtoFormat(
ResourceUtilizationProto p) {
return new ResourceUtilizationPBImpl(p);
}
private ResourceUtilizationProto convertToProtoFormat(ResourceUtilization r) {
return ((ResourceUtilizationPBImpl) r).getProto();
}
@Override
public Set<String> getNodeLabels() {
initNodeLabels();
@ -318,4 +343,52 @@ public class NodeReportPBImpl extends NodeReport {
this.labels = new HashSet<String>();
this.labels.addAll(p.getNodeLabelsList());
}
@Override
public ResourceUtilization getAggregatedContainersUtilization() {
if (this.containersUtilization != null) {
return this.containersUtilization;
}
NodeReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasContainersUtilization()) {
return null;
}
this.containersUtilization = convertFromProtoFormat(p
.getContainersUtilization());
return this.containersUtilization;
}
@Override
public void setAggregatedContainersUtilization(
ResourceUtilization containersResourceUtilization) {
maybeInitBuilder();
if (containersResourceUtilization == null) {
builder.clearContainersUtilization();
}
this.containersUtilization = containersResourceUtilization;
}
@Override
public ResourceUtilization getNodeUtilization() {
if (this.nodeUtilization != null) {
return this.nodeUtilization;
}
NodeReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasNodeUtilization()) {
return null;
}
this.nodeUtilization = convertFromProtoFormat(p.getNodeUtilization());
return this.nodeUtilization;
}
@Override
public void setNodeUtilization(ResourceUtilization nodeResourceUtilization) {
maybeInitBuilder();
if (nodeResourceUtilization == null) {
builder.clearNodeUtilization();
}
this.nodeUtilization = nodeResourceUtilization;
}
}

View File

@ -16,13 +16,13 @@
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records.impl.pb;
package org.apache.hadoop.yarn.api.records.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ResourceUtilizationProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ResourceUtilizationProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceUtilizationProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceUtilizationProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
@Private
@Unstable

View File

@ -142,6 +142,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.SerializedException;
import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
import org.apache.hadoop.yarn.api.records.Token;
@ -496,6 +497,7 @@ public class TestPBImplRecords {
generateByNewInstance(ReservationRequest.class);
generateByNewInstance(ReservationRequests.class);
generateByNewInstance(ReservationDefinition.class);
generateByNewInstance(ResourceUtilization.class);
}
private class GetSetPair {

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.util.Records;
/**

View File

@ -27,10 +27,12 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourceUtilizationPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
@ -38,10 +40,9 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ResourceUtilizationProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceUtilizationProto;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
public class NodeStatusPBImpl extends NodeStatus {
NodeStatusProto proto = NodeStatusProto.getDefaultInstance();

View File

@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.api.records.PreemptionMessage;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
@ -180,11 +181,21 @@ public class BuilderUtils {
return newNodeReport(nodeId, nodeState, httpAddress, rackName, used,
capability, numContainers, healthReport, lastHealthReportTime, null);
}
public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
String httpAddress, String rackName, Resource used, Resource capability,
int numContainers, String healthReport, long lastHealthReportTime,
Set<String> nodeLabels) {
return newNodeReport(nodeId, nodeState, httpAddress, rackName, used,
capability, numContainers, healthReport, lastHealthReportTime,
nodeLabels, null, null);
}
public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
String httpAddress, String rackName, Resource used, Resource capability,
int numContainers, String healthReport, long lastHealthReportTime,
Set<String> nodeLabels, ResourceUtilization containersUtilization,
ResourceUtilization nodeUtilization) {
NodeReport nodeReport = recordFactory.newRecordInstance(NodeReport.class);
nodeReport.setNodeId(nodeId);
nodeReport.setNodeState(nodeState);
@ -196,6 +207,8 @@ public class BuilderUtils {
nodeReport.setHealthReport(healthReport);
nodeReport.setLastHealthReportTime(lastHealthReportTime);
nodeReport.setNodeLabels(nodeLabels);
nodeReport.setAggregatedContainersUtilization(containersUtilization);
nodeReport.setNodeUtilization(nodeUtilization);
return nodeReport;
}

View File

@ -57,8 +57,3 @@ message VersionProto {
optional int32 minor_version = 2;
}
message ResourceUtilizationProto {
optional int32 pmem = 1;
optional int32 vmem = 2;
optional float cpu = 3;
}

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.yarn.server.nodemanager;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
/**
* Interface for monitoring the resources of a node.

View File

@ -23,7 +23,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/**

View File

@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
@ -74,7 +75,6 @@ import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;

View File

@ -988,13 +988,14 @@ public class ClientRMService extends AbstractService implements
used = schedulerNodeReport.getUsedResource();
numContainers = schedulerNodeReport.getNumContainers();
}
NodeReport report =
BuilderUtils.newNodeReport(rmNode.getNodeID(), rmNode.getState(),
rmNode.getHttpAddress(), rmNode.getRackName(), used,
rmNode.getTotalCapability(), numContainers,
rmNode.getHealthReport(), rmNode.getLastHealthReportTime(),
rmNode.getNodeLabels());
rmNode.getNodeLabels(), rmNode.getAggregatedContainersUtilization(),
rmNode.getNodeUtilization());
return report;
}

View File

@ -29,8 +29,8 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
/**
* Node managers information on available resources

View File

@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@ -57,7 +58,6 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEventType;

View File

@ -25,11 +25,11 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
public class RMNodeStatusEvent extends RMNodeEvent {

View File

@ -33,9 +33,9 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;

View File

@ -22,7 +22,7 @@ import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
/**

View File

@ -30,11 +30,11 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;

View File

@ -34,9 +34,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;

View File

@ -28,6 +28,7 @@ import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.MiniYARNCluster.CustomNodeManager;
@ -36,7 +37,6 @@ import org.apache.hadoop.yarn.server.api.ServerRMProxy;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;