MAPREDUCE-3703. ResourceManager should provide node lists in JMX output. (Eric Payne via mahadev) - Merge r1237920 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1237921 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mahadev Konar 2012-01-30 18:51:03 +00:00
parent 4954e376d0
commit fc432d7f33
5 changed files with 263 additions and 0 deletions

View File

@ -563,6 +563,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3742. "yarn logs" command fails with ClassNotFoundException. MAPREDUCE-3742. "yarn logs" command fails with ClassNotFoundException.
(Jason Lowe via mahadev) (Jason Lowe via mahadev)
MAPREDUCE-3703. ResourceManager should provide node lists in JMX output.
(Eric Payne via mahadev)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -0,0 +1,129 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestRMNMInfo {
private static final Log LOG = LogFactory.getLog(TestRMNMInfo.class);
private static final int NUMNODEMANAGERS = 4;
protected static MiniMRYarnCluster mrCluster;
private static Configuration initialConf = new Configuration();
private static FileSystem localFs;
static {
try {
localFs = FileSystem.getLocal(initialConf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
private static Path TEST_ROOT_DIR =
new Path("target",TestRMNMInfo.class.getName() + "-tmpDir")
.makeQualified(localFs.getUri(), localFs.getWorkingDirectory());
static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
@BeforeClass
public static void setup() throws IOException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(TestRMNMInfo.class.getName(),
NUMNODEMANAGERS);
Configuration conf = new Configuration();
mrCluster.init(conf);
mrCluster.start();
}
// workaround the absent public distcache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
@AfterClass
public static void tearDown() {
if (mrCluster != null) {
mrCluster.stop();
mrCluster = null;
}
}
@Test
public void testRMNMInfo() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
RMContext rmc = mrCluster.getResourceManager().getRMContext();
ResourceScheduler rms = mrCluster.getResourceManager()
.getResourceScheduler();
RMNMInfo rmInfo = new RMNMInfo(rmc,rms);
String liveNMs = rmInfo.getLiveNodeManagers();
ObjectMapper mapper = new ObjectMapper();
JsonNode jn = mapper.readTree(liveNMs);
Assert.assertEquals("Unexpected number of live nodes:",
NUMNODEMANAGERS, jn.size());
Iterator<JsonNode> it = jn.iterator();
while (it.hasNext()) {
JsonNode n = it.next();
Assert.assertNotNull(n.get("HostName"));
Assert.assertNotNull(n.get("Rack"));
Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",
n.get("State").getValueAsText().contains("RUNNING"));
Assert.assertNotNull(n.get("NodeHTTPAddress"));
Assert.assertTrue("Node " + n.get("NodeId") + " should be Healthy",
n.get("HealthStatus").getValueAsText().contains("Healthy"));
Assert.assertNotNull(n.get("LastHealthUpdate"));
Assert.assertNotNull(n.get("HealthReport"));
Assert.assertNotNull(n.get("NumContainersMB"));
Assert.assertEquals(
n.get("NodeId") + ": Unexpected number of used containers",
0, n.get("NumContainersMB").getValueAsInt());
Assert.assertEquals(
n.get("NodeId") + ": Unexpected amount of used memory",
0, n.get("UsedMemoryMB").getValueAsInt());
Assert.assertNotNull(n.get("AvailableMemoryMB"));
}
}
}

View File

@ -0,0 +1,106 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.List;
import javax.management.NotCompliantMBeanException;
import javax.management.StandardMBean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
import org.mortbay.util.ajax.JSON;
/**
* JMX bean listing statuses of all node managers.
*/
public class RMNMInfo implements RMNMInfoBeans {
private static final Log LOG = LogFactory.getLog(RMNMInfo.class);
private RMContext rmContext;
private ResourceScheduler scheduler;
/**
* Constructor for RMNMInfo registers the bean with JMX.
*
* @param rmc resource manager's context object
* @param sched resource manager's scheduler object
*/
public RMNMInfo(RMContext rmc, ResourceScheduler sched) {
this.rmContext = rmc;
this.scheduler = sched;
StandardMBean bean;
try {
bean = new StandardMBean(this,RMNMInfoBeans.class);
MBeans.register("ResourceManager", "RMNMInfo", bean);
} catch (NotCompliantMBeanException e) {
LOG.warn("Error registering RMNMInfo MBean", e);
}
LOG.info("Registered RMNMInfo MBean");
}
static class InfoMap extends LinkedHashMap<String, Object> {
private static final long serialVersionUID = 1L;
}
/**
* Implements getLiveNodeManagers()
*
* @return JSON formatted string containing statuses of all node managers
*/
@Override // RMNMInfoBeans
public String getLiveNodeManagers() {
Collection<RMNode> nodes = this.rmContext.getRMNodes().values();
List<InfoMap> nodesInfo = new ArrayList<InfoMap>();
for (final RMNode ni : nodes) {
SchedulerNodeReport report = scheduler.getNodeReport(ni.getNodeID());
InfoMap info = new InfoMap();
info.put("HostName", ni.getHostName());
info.put("Rack", ni.getRackName());
info.put("State", ni.getState().toString());
info.put("NodeId", ni.getNodeID());
info.put("NodeHTTPAddress", ni.getHttpAddress());
info.put("HealthStatus",
ni.getNodeHealthStatus().getIsNodeHealthy() ?
"Healthy" : "Unhealthy");
info.put("LastHealthUpdate",
ni.getNodeHealthStatus().getLastHealthReportTime());
info.put("HealthReport",
ni.getNodeHealthStatus().getHealthReport());
info.put("NumContainersMB", report.getNumContainers());
info.put("UsedMemoryMB", report.getUsedResource().getMemory());
info.put("AvailableMemoryMB",
report.getAvailableResource().getMemory());
nodesInfo.add(info);
}
return JSON.toString(nodesInfo);
}
}

View File

@ -0,0 +1,23 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager;
public interface RMNMInfoBeans {
public String getLiveNodeManagers();
}

View File

@ -207,6 +207,8 @@ public class ResourceManager extends CompositeService implements Recoverable {
addService(applicationMasterLauncher); addService(applicationMasterLauncher);
new RMNMInfo(this.rmContext, this.scheduler);
super.init(conf); super.init(conf);
} }