HDFS-6722. Merge r1616669 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1616670 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
509ed4400a
commit
061a26631f
|
@ -118,6 +118,9 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6740. Make FSDataset support adding data volumes dynamically. (Lei
|
||||
Xu via atm)
|
||||
|
||||
HDFS-6722. Display readable last contact time for dead nodes on NN webUI.
|
||||
(Ming Ma via wheat9)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
|
|
|
@ -282,7 +282,7 @@
|
|||
{#DeadNodes}
|
||||
<tr class="danger">
|
||||
<td>{name} ({xferaddr})</td>
|
||||
<td>{lastContact}</td>
|
||||
<td>{#helper_lastcontact_tostring value="{lastContact}"/}</td>
|
||||
<td>Dead{?decommissioned}, Decommissioned{/decommissioned}</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
|
|
|
@ -139,6 +139,14 @@
|
|||
}
|
||||
|
||||
function load_datanode_info() {
|
||||
|
||||
var HELPERS = {
|
||||
'helper_lastcontact_tostring' : function (chunk, ctx, bodies, params) {
|
||||
var value = dust.helpers.tap(params.value, chunk, ctx);
|
||||
return chunk.write('' + new Date(Date.now()-1000*Number(value)));
|
||||
}
|
||||
};
|
||||
|
||||
function workaround(r) {
|
||||
function node_map_to_array(nodes) {
|
||||
var res = [];
|
||||
|
@ -160,7 +168,8 @@
|
|||
'/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo',
|
||||
guard_with_startup_progress(function (resp) {
|
||||
var data = workaround(resp.beans[0]);
|
||||
dust.render('datanode-info', data, function(err, out) {
|
||||
var base = dust.makeBase(HELPERS);
|
||||
dust.render('datanode-info', base.push(data), function(err, out) {
|
||||
$('#tab-datanode').html(out);
|
||||
$('#ui-tabs a[href="#tab-datanode"]').tab('show');
|
||||
});
|
||||
|
|
|
@ -30,9 +30,13 @@ import javax.management.MBeanServer;
|
|||
import javax.management.ObjectName;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
|
@ -58,11 +62,14 @@ public class TestNameNodeMXBean {
|
|||
public void testNameNodeMXBeanInfo() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
||||
NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
|
||||
NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
|
||||
|
||||
MiniDFSCluster cluster = null;
|
||||
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
cluster.waitActive();
|
||||
|
||||
FSNamesystem fsn = cluster.getNameNode().namesystem;
|
||||
|
@ -70,6 +77,29 @@ public class TestNameNodeMXBean {
|
|||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
ObjectName mxbeanName = new ObjectName(
|
||||
"Hadoop:service=NameNode,name=NameNodeInfo");
|
||||
|
||||
// Define include file to generate deadNodes metrics
|
||||
FileSystem localFileSys = FileSystem.getLocal(conf);
|
||||
Path workingDir = localFileSys.getWorkingDirectory();
|
||||
Path dir = new Path(workingDir,
|
||||
"build/test/data/temp/TestNameNodeMXBean");
|
||||
Path includeFile = new Path(dir, "include");
|
||||
assertTrue(localFileSys.mkdirs(dir));
|
||||
StringBuilder includeHosts = new StringBuilder();
|
||||
for(DataNode dn : cluster.getDataNodes()) {
|
||||
includeHosts.append(dn.getDisplayName()).append("\n");
|
||||
}
|
||||
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
|
||||
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
|
||||
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||
|
||||
cluster.stopDataNode(0);
|
||||
while (fsn.getNumDatanodesInService() != 2) {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {}
|
||||
}
|
||||
|
||||
// get attribute "ClusterId"
|
||||
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
|
||||
assertEquals(fsn.getClusterId(), clusterId);
|
||||
|
@ -121,6 +151,15 @@ public class TestNameNodeMXBean {
|
|||
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
|
||||
"DeadNodes"));
|
||||
assertEquals(fsn.getDeadNodes(), deadnodeinfo);
|
||||
Map<String, Map<String, Object>> deadNodes =
|
||||
(Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
|
||||
assertTrue(deadNodes.size() > 0);
|
||||
for (Map<String, Object> deadNode : deadNodes.values()) {
|
||||
assertTrue(deadNode.containsKey("lastContact"));
|
||||
assertTrue(deadNode.containsKey("decommissioned"));
|
||||
assertTrue(deadNode.containsKey("xferaddr"));
|
||||
}
|
||||
|
||||
// get attribute NodeUsage
|
||||
String nodeUsage = (String) (mbs.getAttribute(mxbeanName,
|
||||
"NodeUsage"));
|
||||
|
@ -181,7 +220,7 @@ public class TestNameNodeMXBean {
|
|||
assertEquals(1, statusMap.get("active").size());
|
||||
assertEquals(1, statusMap.get("failed").size());
|
||||
assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
|
||||
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
|
||||
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
|
||||
cluster.getDataNodes().size(),
|
||||
mbs.getAttribute(mxbeanName, "CacheCapacity"));
|
||||
} finally {
|
||||
|
|
Loading…
Reference in New Issue