MAPREDUCE-2738. Added the missing cluster level statisticss on the RM web UI. Contributed by Robert Joseph Evans.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1179229 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Vinod Kumar Vavilapalli 2011-10-05 14:01:32 +00:00
parent f29bfa0e0e
commit 9a4e890f4a
16 changed files with 394 additions and 73 deletions

View File

@ -351,6 +351,9 @@ Release 0.23.0 - Unreleased
the outputs of tasks from a crashed job so as to support MR Application
Master recovery. (Sharad Agarwal and Arun C Murthy via vinodkv)
MAPREDUCE-2738. Added the missing cluster level statisticss on the RM web
UI. (Robert Joseph Evans via vinodkv)
OPTIMIZATIONS
MAPREDUCE-2026. Make JobTracker.getJobCounters() and

View File

@ -110,7 +110,7 @@ public class QueueMetrics {
"Metrics for queue: " + queueName, metrics);
}
synchronized QueueMetrics getUserMetrics(String userName) {
public synchronized QueueMetrics getUserMetrics(String userName) {
if (users == null) {
return null;
}

View File

@ -99,7 +99,8 @@ public interface YarnScheduler extends EventHandler<SchedulerEvent> {
/**
* Get node resource usage report.
* @param nodeId
* @return the {@link SchedulerNodeReport} for the node
* @return the {@link SchedulerNodeReport} for the node or null
* if nodeId does not point to a defined node.
*/
@LimitedPrivate("yarn")
@Stable

View File

@ -173,14 +173,6 @@ public class FifoScheduler implements ResourceScheduler {
}
};
public synchronized Resource getUsedResource(NodeId nodeId) {
return getNode(nodeId).getUsedResource();
}
public synchronized Resource getAvailableResource(NodeId nodeId) {
return getNode(nodeId).getAvailableResource();
}
@Override
public Resource getMinimumResourceCapability() {
return minimumAllocation;
@ -718,6 +710,9 @@ public class FifoScheduler implements ResourceScheduler {
// Inform the node
node.releaseContainer(container);
// Update total usage
Resources.subtractFrom(usedResource, container.getResource());
LOG.info("Application " + applicationAttemptId +
" released container " + container.getId() +

View File

@ -0,0 +1,55 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
import com.google.inject.Inject;
public class AboutBlock extends HtmlBlock {
final ResourceManager rm;
@Inject
AboutBlock(ResourceManager rm, ViewContext ctx) {
super(ctx);
this.rm = rm;
}
@Override
protected void render(Block html) {
html._(MetricsOverviewTable.class);
long ts = ResourceManager.clusterTimeStamp;
ResourceManager rm = getInstance(ResourceManager.class);
info("Cluster overview").
_("Cluster ID:", ts).
_("ResourceManager state:", rm.getServiceState()).
_("ResourceManager started on:", Times.format(ts)).
_("ResourceManager version:", YarnVersionInfo.getBuildVersion() +
" on " + YarnVersionInfo.getDate()).
_("Hadoop version:", VersionInfo.getBuildVersion() +
" on " + VersionInfo.getDate());
html._(InfoBlock.class);
}
}

View File

@ -19,15 +19,14 @@
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
public class InfoPage extends RmView {
public class AboutPage extends RmView {
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
}
@Override protected Class<? extends SubView> content() {
return InfoBlock.class;
return AboutBlock.class;
}
}

View File

@ -0,0 +1,31 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
/**
* Renders a block for the applications with metrics information.
*/
class AppsBlockWithMetrics extends HtmlBlock {
@Override public void render(Block html) {
html._(MetricsOverviewTable.class);
html._(AppsBlock.class);
}
}

View File

@ -31,7 +31,6 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import static org.apache.hadoop.yarn.util.StringHelper.*;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
class CapacitySchedulerPage extends RmView {
static final String _Q = ".ui-state-default.ui-corner-all";
@ -96,6 +95,7 @@ class CapacitySchedulerPage extends RmView {
@Override
public void render(Block html) {
html._(MetricsOverviewTable.class);
UL<DIV<DIV<Hamlet>>> ul = html.
div("#cs-wrapper.ui-widget").
div(".ui-widget-header.ui-corner-top").

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import com.google.inject.Inject;
import com.google.inject.servlet.RequestScoped;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
@ -35,7 +34,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
import static org.apache.hadoop.yarn.util.StringHelper.*;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
class DefaultSchedulerPage extends RmView {
static final String _Q = ".ui-state-default.ui-corner-all";
@ -76,8 +74,9 @@ class DefaultSchedulerPage extends RmView {
int nodeContainers = 0;
for (RMNode ni : this.rmContext.getRMNodes().values()) {
usedNodeMem += fs.getUsedResource(ni.getNodeID()).getMemory();
availNodeMem += fs.getAvailableResource(ni.getNodeID()).getMemory();
SchedulerNodeReport report = fs.getNodeReport(ni.getNodeID());
usedNodeMem += report.getUsedResource().getMemory();
availNodeMem += report.getAvailableResource().getMemory();
totNodeMem += ni.getTotalCapability().getMemory();
nodeContainers += fs.getNodeReport(ni.getNodeID()).getNumContainers();
}
@ -109,6 +108,7 @@ class DefaultSchedulerPage extends RmView {
@Override
public void render(Block html) {
html._(MetricsOverviewTable.class);
UL<DIV<DIV<Hamlet>>> ul = html.
div("#cs-wrapper.ui-widget").
div(".ui-widget-header.ui-corner-top").

View File

@ -0,0 +1,164 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import com.google.inject.Inject;
/**
* Provides an table with an overview of many cluster wide metrics and if
* per user metrics are enabled it will show an overview of what the
* current user is using on the cluster.
*/
public class MetricsOverviewTable extends HtmlBlock {
private static final long BYTES_IN_GB = 1024 * 1024 * 1024;
private final RMContext rmContext;
private final ResourceManager rm;
@Inject
MetricsOverviewTable(RMContext context, ResourceManager rm, ViewContext ctx) {
super(ctx);
this.rmContext = context;
this.rm = rm;
}
@Override
protected void render(Block html) {
//Yes this is a hack, but there is no other way to insert
//CSS in the correct spot
html.style(".metrics {margin-bottom:5px}");
ResourceScheduler rs = rm.getResourceScheduler();
QueueMetrics metrics = rs.getRootQueueMetrics();
int appsSubmitted = metrics.getAppsSubmitted();
int reservedGB = metrics.getReservedGB();
int availableGB = metrics.getAvailableGB();
int allocatedGB = metrics.getAllocatedGB();
int containersAllocated = metrics.getAllocatedContainers();
int totalGB = availableGB + reservedGB + allocatedGB;
ConcurrentMap<NodeId,RMNode> nodes = rmContext.getRMNodes();
int totalNodes = nodes.size();
int lostNodes = 0;
int unhealthyNodes = 0;
int decommissionedNodes = 0;
for(RMNode node: nodes.values()) {
if(node == null || node.getState() == null) {
lostNodes++;
continue;
}
switch(node.getState()) {
case DECOMMISSIONED:
decommissionedNodes++;
break;
case LOST:
lostNodes++;
break;
case UNHEALTHY:
unhealthyNodes++;
break;
//RUNNING noop
}
}
DIV<Hamlet> div = html.div().$class("metrics");
div.table("#metricsoverview").
thead().$class("ui-widget-header").
tr().
th().$class("ui-state-default")._("Apps Submitted")._().
th().$class("ui-state-default")._("Containers Running")._().
th().$class("ui-state-default")._("Memory Used")._().
th().$class("ui-state-default")._("Memopry Total")._().
th().$class("ui-state-default")._("Memory Reserved")._().
th().$class("ui-state-default")._("Total Nodes")._().
th().$class("ui-state-default")._("Decommissioned Nodes")._().
th().$class("ui-state-default")._("Lost Nodes")._().
th().$class("ui-state-default")._("Unhealthy Nodes")._().
_().
_().
tbody().$class("ui-widget-content").
tr().
td(String.valueOf(appsSubmitted)).
td(String.valueOf(containersAllocated)).
td(StringUtils.byteDesc(allocatedGB * BYTES_IN_GB)).
td(StringUtils.byteDesc(totalGB * BYTES_IN_GB)).
td(StringUtils.byteDesc(reservedGB * BYTES_IN_GB)).
td().a(url("nodes"),String.valueOf(totalNodes))._().
td().a(url("nodes/DECOMMISSIONED"),String.valueOf(decommissionedNodes))._().
td().a(url("nodes/LOST"),String.valueOf(lostNodes))._().
td().a(url("nodes/UNHEALTHY"),String.valueOf(unhealthyNodes))._().
_().
_()._();
String user = request().getRemoteUser();
if (user != null) {
QueueMetrics userMetrics = metrics.getUserMetrics(user);
if(userMetrics != null) {
int myAppsSubmitted = userMetrics.getAppsSubmitted();
int myRunningContainers = userMetrics.getAllocatedContainers();
int myPendingContainers = userMetrics.getPendingContainers();
int myReservedContainers = userMetrics.getReservedContainers();
int myReservedGB = userMetrics.getReservedGB();
int myPendingGB = userMetrics.getPendingGB();
int myAllocatedGB = userMetrics.getAllocatedGB();
div.table("#usermetricsoverview").
thead().$class("ui-widget-header").
tr().
th().$class("ui-state-default")._("Apps Submitted ("+user+")")._().
th().$class("ui-state-default")._("Containers Running ("+user+")")._().
th().$class("ui-state-default")._("Containers Pending ("+user+")")._().
th().$class("ui-state-default")._("Containers Reserved ("+user+")")._().
th().$class("ui-state-default")._("Memory Used ("+user+")")._().
th().$class("ui-state-default")._("Memory Pending ("+user+")")._().
th().$class("ui-state-default")._("Memory Reserved ("+user+")")._().
_().
_().
tbody().$class("ui-widget-content").
tr().
td(String.valueOf(myAppsSubmitted)).
td(String.valueOf(myRunningContainers)).
td(String.valueOf(myPendingContainers)).
td(String.valueOf(myReservedContainers)).
td(StringUtils.byteDesc(myAllocatedGB * BYTES_IN_GB)).
td(StringUtils.byteDesc(myPendingGB * BYTES_IN_GB)).
td(StringUtils.byteDesc(myReservedGB * BYTES_IN_GB)).
_().
_()._();
}
}
div._();
}
}

View File

@ -18,14 +18,21 @@
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import static org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp.NODE_STATE;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
@ -38,45 +45,75 @@ import com.google.inject.Inject;
class NodesPage extends RmView {
static class NodesBlock extends HtmlBlock {
private static final long BYTES_IN_MB = 1024 * 1024;
final RMContext rmContext;
final ResourceManager rm;
@Inject
NodesBlock(RMContext context, ViewContext ctx) {
NodesBlock(RMContext context, ResourceManager rm, ViewContext ctx) {
super(ctx);
this.rmContext = context;
this.rm = rm;
}
@Override
protected void render(Block html) {
html._(MetricsOverviewTable.class);
ResourceScheduler sched = rm.getResourceScheduler();
String type = $(NODE_STATE);
TBODY<TABLE<Hamlet>> tbody = html.table("#nodes").
thead().
tr().
th(".rack", "Rack").
th(".state", "Node State").
th(".nodeaddress", "Node Address").
th(".nodehttpaddress", "Node HTTP Address").
th(".healthStatus", "Health-status").
th(".lastHealthUpdate", "Last health-update").
th(".healthReport", "Health-report").
th(".containers", "Containers").
// th(".mem", "Mem Used (MB)").
// th(".mem", "Mem Avail (MB)").
th(".mem", "Mem Used").
th(".mem", "Mem Avail").
_()._().
tbody();
RMNodeState stateFilter = null;
if(type != null && !type.isEmpty()) {
stateFilter = RMNodeState.valueOf(type.toUpperCase());
}
for (RMNode ni : this.rmContext.getRMNodes().values()) {
if(stateFilter != null) {
RMNodeState state = ni.getState();
if(!stateFilter.equals(state)) {
continue;
}
}
NodeId id = ni.getNodeID();
SchedulerNodeReport report = sched.getNodeReport(id);
int numContainers = 0;
int usedMemory = 0;
int availableMemory = 0;
if(report != null) {
numContainers = report.getNumContainers();
usedMemory = report.getUsedResource().getMemory();
availableMemory = report.getAvailableResource().getMemory();
}
NodeHealthStatus health = ni.getNodeHealthStatus();
tbody.tr().
td(ni.getRackName()).
td(String.valueOf(ni.getState())).
td(String.valueOf(ni.getNodeID().toString())).
td().a("http://" + ni.getHttpAddress(), ni.getHttpAddress())._().
td(health.getIsNodeHealthy() ? "Healthy" : "Unhealthy").
td(Times.format(health.getLastHealthReportTime())).
td(String.valueOf(health.getHealthReport())).
// TODO: acm: refactor2 FIXME
//td(String.valueOf(ni.getNumContainers())).
// TODO: FIXME Vinodkv
// td(String.valueOf(ni.getUsedResource().getMemory())).
// td(String.valueOf(ni.getAvailableResource().getMemory())).
td("n/a")._();
td(String.valueOf(numContainers)).
td().br().$title(String.valueOf(usedMemory))._().
_(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._().
td().br().$title(String.valueOf(usedMemory))._().
_(StringUtils.byteDesc(availableMemory * BYTES_IN_MB))._().
_();
}
tbody._()._();
}
@ -84,7 +121,12 @@ class NodesPage extends RmView {
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
setTitle("Nodes of the cluster");
String type = $(NODE_STATE);
String title = "Nodes of the cluster";
if(type != null && !type.isEmpty()) {
title = title+" ("+type+")";
}
setTitle(title);
set(DATATABLES_ID, "nodes");
set(initID(DATATABLES, "nodes"), nodesTableInit());
setTableStyles(html, "nodes", ".healthStatus {width:10em}",
@ -96,11 +138,10 @@ class NodesPage extends RmView {
}
private String nodesTableInit() {
return tableInit().
// rack, nodeid, host, healthStatus, health update ts, health report,
// containers, memused, memavail
append(", aoColumns:[null, null, null, null, null, null, ").
append("{sType:'title-numeric', bSearchable:false}]}").
toString();
StringBuilder b = tableInit().append(",aoColumnDefs:[");
b.append("{'bSearchable':false, 'aTargets': [7]} ,");
b.append("{'sType':'title-numeric', 'bSearchable':false, " +
"'aTargets': [ 8, 9] }]}");
return b.toString();
}
}

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.webapp.WebApp;
public class RMWebApp extends WebApp {
static final String APP_ID = "app.id";
static final String QUEUE_NAME = "queue.name";
static final String NODE_STATE = "node.state";
private final ResourceManager rm;
@ -44,9 +45,9 @@ public class RMWebApp extends WebApp {
bind(RMContext.class).toInstance(rm.getRMContext());
}
route("/", RmController.class);
route("/nodes", RmController.class, "nodes");
route(pajoin("/nodes", NODE_STATE), RmController.class, "nodes");
route("/apps", RmController.class);
route("/cluster", RmController.class, "info");
route("/cluster", RmController.class, "about");
route(pajoin("/app", APP_ID), RmController.class, "app");
route("/scheduler", RmController.class, "scheduler");
route(pajoin("/queue", QUEUE_NAME), RmController.class, "queue");

View File

@ -22,8 +22,9 @@ import static org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp.APP_
import static org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp.QUEUE_NAME;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@ -35,7 +36,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
import org.apache.hadoop.yarn.util.Apps;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
import org.apache.hadoop.yarn.webapp.Controller;
import org.apache.hadoop.yarn.webapp.ResponseInfo;
@ -50,25 +50,15 @@ public class RmController extends Controller {
setTitle("Applications");
}
public void info() {
public void about() {
setTitle("About the Cluster");
long ts = ResourceManager.clusterTimeStamp;
ResourceManager rm = getInstance(ResourceManager.class);
info("Cluster overview").
_("Cluster ID:", ts).
_("ResourceManager state:", rm.getServiceState()).
_("ResourceManager started on:", Times.format(ts)).
_("ResourceManager version:", YarnVersionInfo.getBuildVersion() +
" on " + YarnVersionInfo.getDate()).
_("Hadoop version:", VersionInfo.getBuildVersion() +
" on " + VersionInfo.getDate());
render(InfoPage.class);
render(AboutPage.class);
}
public void app() {
String aid = $(APP_ID);
if (aid.isEmpty()) {
setStatus(response().SC_BAD_REQUEST);
setStatus(HttpServletResponse.SC_BAD_REQUEST);
setTitle("Bad request: requires application ID");
return;
}
@ -77,7 +67,7 @@ public class RmController extends Controller {
RMApp app = context.getRMApps().get(appID);
if (app == null) {
// TODO: handle redirect to jobhistory server
setStatus(response().SC_NOT_FOUND);
setStatus(HttpServletResponse.SC_NOT_FOUND);
setTitle("Application not found: "+ aid);
return;
}
@ -107,7 +97,7 @@ public class RmController extends Controller {
} else {
info._("AM container logs:", "AM not yet registered with RM");
}
render(InfoPage.class);
render(AboutPage.class);
}
public void nodes() {

View File

@ -52,7 +52,7 @@ public class RmView extends TwoColumnLayout {
@Override
protected Class<? extends SubView> content() {
return AppsBlock.class;
return AppsBlockWithMetrics.class;
}
private String appsTableInit() {
@ -60,7 +60,7 @@ public class RmView extends TwoColumnLayout {
// id, user, name, queue, state, progress, ui, note
StringBuilder init = tableInit().
append(", aoColumns:[{sType:'title-numeric'}, null, null, null, null,").
append("{sType:'title-numeric', bSearchable:false}, null, null]");
append("null,{sType:'title-numeric', bSearchable:false}, null, null]");
String rows = $("rowlimit");
int rowLimit = rows.isEmpty() ? MAX_DISPLAY_ROWS : Integer.parseInt(rows);
if (list.apps.size() < rowLimit) {

View File

@ -17,14 +17,20 @@
*/
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import java.io.IOException;
import java.io.PrintWriter;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.NodesPage.NodesBlock;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.inject.Binder;
import com.google.inject.Injector;
import com.google.inject.Module;
/**
* This tests the NodesPage block table that it should contain the table body
* data for all the columns in the table as specified in the header.
@ -33,23 +39,36 @@ public class TestNodesPage {
@Test
public void testNodesBlockRender() throws Exception {
int numberOfRacks = 2;
int numberOfNodesPerRack = 2;
final int numberOfRacks = 2;
final int numberOfNodesPerRack = 2;
// Number of Actual Table Headers for NodesPage.NodesBlock might change in
// future. In that case this value should be adjusted to the new value.
int numberOfActualTableHeaders = 7;
final int numberOfThInMetricsTable = 9;
final int numberOfActualTableHeaders = 10;
PrintWriter writer = WebAppTests.testBlock(
NodesBlock.class,
RMContext.class,
TestRMWebApp.mockRMContext(3, numberOfRacks, numberOfNodesPerRack,
8 * TestRMWebApp.GiB)).getInstance(PrintWriter.class);
Injector injector = WebAppTests.createMockInjector(RMContext.class,
TestRMWebApp.mockRMContext(3, numberOfRacks, numberOfNodesPerRack, 8*TestRMWebApp.GiB),
new Module() {
@Override
public void configure(Binder binder) {
try {
binder.bind(ResourceManager.class).toInstance(TestRMWebApp.mockRm(3,
numberOfRacks, numberOfNodesPerRack, 8*TestRMWebApp.GiB));
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
});
injector.getInstance(NodesBlock.class).render();
PrintWriter writer = injector.getInstance(PrintWriter.class);
WebAppTests.flushOutput(injector);
Mockito.verify(writer, Mockito.times(numberOfActualTableHeaders)).print(
Mockito.verify(writer, Mockito.times(numberOfActualTableHeaders +
numberOfThInMetricsTable)).print(
"<th");
Mockito.verify(
writer,
Mockito.times(numberOfRacks * numberOfNodesPerRack
* numberOfActualTableHeaders)).print("<td");
* numberOfActualTableHeaders + numberOfThInMetricsTable)).print("<td");
}
}

View File

@ -24,10 +24,10 @@ import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
@ -37,7 +37,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.MockAsm;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
@ -48,7 +47,9 @@ import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Test;
import com.google.common.collect.Maps;
import com.google.inject.Binder;
import com.google.inject.Injector;
import com.google.inject.Module;
public class TestRMWebApp {
static final int GiB = 1024; // MiB
@ -62,14 +63,36 @@ public class TestRMWebApp {
@Test public void testView() {
Injector injector = WebAppTests.createMockInjector(RMContext.class,
mockRMContext(3, 1, 2, 8*GiB));
mockRMContext(3, 1, 2, 8*GiB),
new Module() {
@Override
public void configure(Binder binder) {
try {
binder.bind(ResourceManager.class).toInstance(mockRm(3, 1, 2, 8*GiB));
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
});
injector.getInstance(RmView.class).render();
WebAppTests.flushOutput(injector);
}
@Test public void testNodesPage() {
WebAppTests.testPage(NodesPage.class, RMContext.class,
mockRMContext(3, 1, 2, 8*GiB));
Injector injector = WebAppTests.createMockInjector(RMContext.class,
mockRMContext(3, 1, 2, 8*GiB),
new Module() {
@Override
public void configure(Binder binder) {
try {
binder.bind(ResourceManager.class).toInstance(mockRm(3, 1, 2, 8*GiB));
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
});
injector.getInstance(NodesPage.class).render();
WebAppTests.flushOutput(injector);
}
public static RMContext mockRMContext(int numApps, int racks, int numNodes,
@ -99,8 +122,7 @@ public class TestRMWebApp {
}
public static ResourceManager mockRm(int apps, int racks, int nodes,
int mbsPerNode)
throws Exception {
int mbsPerNode) throws IOException {
ResourceManager rm = mock(ResourceManager.class);
RMContext rmContext = mockRMContext(apps, racks, nodes,
mbsPerNode);
@ -110,7 +132,7 @@ public class TestRMWebApp {
return rm;
}
public static CapacityScheduler mockCapacityScheduler() throws Exception {
public static CapacityScheduler mockCapacityScheduler() throws IOException {
// stolen from TestCapacityScheduler
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
setupQueueConfiguration(conf);