From 1e347631817d882353bfb91d68f109cb8232e8c4 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Tue, 28 Jun 2016 16:49:39 +0530 Subject: [PATCH] HDFS-10440. Improve DataNode web UI (Contributed by Weiwei Yang) (cherry picked from commit 2a0082c51da7cbe2770eddb5f72cd7f8d72fa5f6) --- .../hdfs/server/datanode/BPServiceActor.java | 46 +++++++ .../hadoop/hdfs/server/datanode/DataNode.java | 26 ++++ .../hdfs/server/datanode/DataNodeMXBean.java | 20 ++- .../src/main/webapps/datanode/datanode.html | 129 ++++++++++++++++++ .../src/main/webapps/datanode/dn.js | 70 ++++++++++ .../src/main/webapps/datanode/index.html | 48 +------ .../server/datanode/TestDataNodeMXBean.java | 4 + 7 files changed, 297 insertions(+), 46 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 99874dd0308..70004e0e72f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -26,6 +26,7 @@ import java.net.InetSocketAddress; import java.net.SocketTimeoutException; import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionUtil; @@ -138,6 +140,10 @@ class BPServiceActor implements Runnable { || runningState == BPServiceActor.RunningState.CONNECTING; } + String getRunningState() { + return runningState.toString(); + } + @Override public String toString() { return bpos.toString() + " service to " + nnAddr; @@ -147,6 +153,22 @@ class BPServiceActor implements Runnable { return nnAddr; } + private String getNameNodeAddress() { + return NetUtils.getHostPortString(getNNSocketAddress()); + } + + Map getActorInfoMap() { + final Map info = new HashMap(); + info.put("NamenodeAddress", getNameNodeAddress()); + info.put("BlockPoolID", bpos.getBlockPoolId()); + info.put("ActorState", getRunningState()); + info.put("LastHeartbeat", + String.valueOf(getScheduler().getLastHearbeatTime())); + info.put("LastBlockReport", + String.valueOf(getScheduler().getLastBlockReportTime())); + return info; + } + private final CountDownLatch initialRegistrationComplete; private final LifelineSender lifelineSender; @@ -379,6 +401,7 @@ class BPServiceActor implements Runnable { (nCmds + " commands: " + Joiner.on("; ").join(cmds)))) + "."); } + scheduler.updateLastBlockReportTime(monotonicNow()); scheduler.scheduleNextBlockReport(); return cmds.size() == 0 ? null : cmds; } @@ -425,6 +448,7 @@ class BPServiceActor implements Runnable { " storage reports from service actor: " + this); } + scheduler.updateLastHeartbeatTime(monotonicNow()); VolumeFailureSummary volumeFailureSummary = dn.getFSDataset() .getVolumeFailureSummary(); int numFailedVolumes = volumeFailureSummary != null ? @@ -995,6 +1019,12 @@ class BPServiceActor implements Runnable { @VisibleForTesting volatile long nextLifelineTime = monotonicNow(); + @VisibleForTesting + volatile long lastBlockReportTime = monotonicNow(); + + @VisibleForTesting + volatile long lastHeartbeatTime = monotonicNow(); + @VisibleForTesting boolean resetBlockReportTime = true; @@ -1033,6 +1063,22 @@ class BPServiceActor implements Runnable { return nextHeartbeatTime; } + void updateLastHeartbeatTime(long heartbeatTime) { + lastHeartbeatTime = heartbeatTime; + } + + void updateLastBlockReportTime(long blockReportTime) { + lastBlockReportTime = blockReportTime; + } + + long getLastHearbeatTime() { + return (monotonicNow() - lastHeartbeatTime)/1000; + } + + long getLastBlockReportTime() { + return (monotonicNow() - lastBlockReportTime)/1000; + } + long scheduleNextLifeline(long baseTime) { // Numerical overflow is possible here and is okay. nextLifelineTime = baseTime + lifelineIntervalMs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 1cc01cba5f3..e7b72f777b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -2844,6 +2844,13 @@ public class DataNode extends ReconfigurableBase return Integer.toString(ipcAddr.getPort()); } + @Override // DataNodeMXBean + public String getDataPort(){ + InetSocketAddress dataAddr = NetUtils.createSocketAddr( + this.getConf().get(DFS_DATANODE_ADDRESS_KEY)); + return Integer.toString(dataAddr.getPort()); + } + @Override // DataNodeMXBean public String getHttpPort(){ return this.getConf().get("dfs.datanode.info.port"); @@ -2883,6 +2890,25 @@ public class DataNode extends ReconfigurableBase return JSON.toString(info); } + /** + * Returned information is a JSON representation of an array, + * each element of the array is a map contains the information + * about a block pool service actor. + */ + @Override // DataNodeMXBean + public String getBPServiceActorInfo() { + final ArrayList> infoArray = + new ArrayList>(); + for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) { + if (bpos != null) { + for (BPServiceActor actor : bpos.getBPServiceActors()) { + infoArray.add(actor.getActorInfoMap()); + } + } + } + return JSON.toString(infoArray); + } + /** * Returned information is a JSON representation of a map with * volume name as the key and value is a map of volume attribute diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java index 92abd886fee..6b5428bbe99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java @@ -50,14 +50,28 @@ public interface DataNodeMXBean { * @return the http port */ public String getHttpPort(); - + /** - * Gets the namenode IP addresses + * Gets the data port. + * + * @return the data port + */ + String getDataPort(); + + /** + * Gets the namenode IP addresses. * * @return the namenode IP addresses that the datanode is talking to */ public String getNamenodeAddresses(); - + + /** + * Gets information of the block pool service actors. + * + * @return block pool service actors info + */ + String getBPServiceActorInfo(); + /** * Gets the information of each volume on the Datanode. Please * see the implementation for the format of returned information. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html new file mode 100644 index 00000000000..22a2733dff4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html @@ -0,0 +1,129 @@ + + + + + + + +DataNode Information + + + + + +
+ +
+
+ +
+
+
+ +
+
+
+ +
+
+

Hadoop, {release-year-token}.

+
+
+ + + + + + + + + + + + + \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js new file mode 100644 index 00000000000..ea963cc37aa --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +(function () { + "use strict"; + + var data = {}; + + dust.loadSource(dust.compile($('#tmpl-dn').html(), 'dn')); + + function load() { + $.get('/jmx?qry=Hadoop:service=DataNode,name=DataNodeInfo', function(resp) { + data.dn = workaround(resp.beans[0]); + data.dn.HostName=window.location.hostname; + render(); + }).fail(show_err_msg); + } + + function workaround(dn) { + function node_map_to_array(nodes) { + var res = []; + for (var n in nodes) { + var p = nodes[n]; + p.name = n; + res.push(p); + } + return res; + } + + dn.VolumeInfo = node_map_to_array(JSON.parse(dn.VolumeInfo)); + dn.BPServiceActorInfo = JSON.parse(dn.BPServiceActorInfo); + + return dn; + } + + function render() { + var base = dust.makeBase({ + 'helper_relative_time' : function (chunk, ctx, bodies, params) { + var value = dust.helpers.tap(params.value, chunk, ctx); + return chunk.write(moment().subtract(Number(value), 'seconds').fromNow(true)); + } + }); + dust.render('dn', base.push(data), function(err, out) { + $('#tab-overview').html(out); + $('#tab-overview').addClass('active'); + }); + } + + function show_err_msg() { + $('#alert-panel-body').html("Failed to load datanode information"); + $('#alert-panel').show(); + } + + load(); + +})(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html index a88bc9be582..fee51be2341 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html @@ -1,5 +1,3 @@ - + - - - -DataNode Information + + Hadoop Administration - - - - -
- -
-
- -
-
- -
-
-

Hadoop, {release-year-token}.

-
-
- - - - - + \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java index 9f5a4715c4c..24fe336e3de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java @@ -78,6 +78,10 @@ public class TestDataNodeMXBean { int xceiverCount = (Integer)mbs.getAttribute(mxbeanName, "XceiverCount"); Assert.assertEquals(datanode.getXceiverCount(), xceiverCount); + + String bpActorInfo = (String)mbs.getAttribute(mxbeanName, + "BPServiceActorInfo"); + Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo); } finally { if (cluster != null) {cluster.shutdown();} }