diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 42ce130ae7a..3b179032457 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -376,6 +376,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6740. Make FSDataset support adding data volumes dynamically. (Lei
Xu via atm)
+ HDFS-6722. Display readable last contact time for dead nodes on NN webUI.
+ (Ming Ma via wheat9)
+
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 25895261982..02858f18265 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -281,7 +281,7 @@
{#DeadNodes}
{name} ({xferaddr}) |
- {lastContact} |
+ {#helper_lastcontact_tostring value="{lastContact}"/} |
Dead{?decommissioned}, Decommissioned{/decommissioned} |
- |
- |
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index 04d3922b1f2..e63c279b936 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -139,6 +139,14 @@
}
function load_datanode_info() {
+
+ var HELPERS = {
+ 'helper_lastcontact_tostring' : function (chunk, ctx, bodies, params) {
+ var value = dust.helpers.tap(params.value, chunk, ctx);
+ return chunk.write('' + new Date(Date.now()-1000*Number(value)));
+ }
+ };
+
function workaround(r) {
function node_map_to_array(nodes) {
var res = [];
@@ -160,7 +168,8 @@
'/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo',
guard_with_startup_progress(function (resp) {
var data = workaround(resp.beans[0]);
- dust.render('datanode-info', data, function(err, out) {
+ var base = dust.makeBase(HELPERS);
+ dust.render('datanode-info', base.push(data), function(err, out) {
$('#tab-datanode').html(out);
$('#ui-tabs a[href="#tab-datanode"]').tab('show');
});
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index d459d30dc55..4e078549b44 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -30,9 +30,13 @@ import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.util.VersionInfo;
@@ -58,11 +62,14 @@ public class TestNameNodeMXBean {
public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
- NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
+ NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
+ conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
+
MiniDFSCluster cluster = null;
try {
- cluster = new MiniDFSCluster.Builder(conf).build();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem;
@@ -70,6 +77,29 @@ public class TestNameNodeMXBean {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
+
+ // Define include file to generate deadNodes metrics
+ FileSystem localFileSys = FileSystem.getLocal(conf);
+ Path workingDir = localFileSys.getWorkingDirectory();
+ Path dir = new Path(workingDir,
+ "build/test/data/temp/TestNameNodeMXBean");
+ Path includeFile = new Path(dir, "include");
+ assertTrue(localFileSys.mkdirs(dir));
+ StringBuilder includeHosts = new StringBuilder();
+ for(DataNode dn : cluster.getDataNodes()) {
+ includeHosts.append(dn.getDisplayName()).append("\n");
+ }
+ DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
+ conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
+ fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
+
+ cluster.stopDataNode(0);
+ while (fsn.getNumDatanodesInService() != 2) {
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {}
+ }
+
// get attribute "ClusterId"
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
assertEquals(fsn.getClusterId(), clusterId);
@@ -121,6 +151,15 @@ public class TestNameNodeMXBean {
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadnodeinfo);
+ Map> deadNodes =
+ (Map>) JSON.parse(deadnodeinfo);
+ assertTrue(deadNodes.size() > 0);
+ for (Map deadNode : deadNodes.values()) {
+ assertTrue(deadNode.containsKey("lastContact"));
+ assertTrue(deadNode.containsKey("decommissioned"));
+ assertTrue(deadNode.containsKey("xferaddr"));
+ }
+
// get attribute NodeUsage
String nodeUsage = (String) (mbs.getAttribute(mxbeanName,
"NodeUsage"));
@@ -181,7 +220,7 @@ public class TestNameNodeMXBean {
assertEquals(1, statusMap.get("active").size());
assertEquals(1, statusMap.get("failed").size());
assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
- assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
+ assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
cluster.getDataNodes().size(),
mbs.getAttribute(mxbeanName, "CacheCapacity"));
} finally {