HDFS-4860. Add additional attributes to JMX beans. Contributed by Trevor Lorimer

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1500140 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Konstantin Boudnik 2013-07-05 21:40:46 +00:00
parent c478992e4c
commit e9b658b021
6 changed files with 167 additions and 0 deletions

View File

@ -101,8 +101,10 @@ public class JvmMetrics implements MetricsSource {
Runtime runtime = Runtime.getRuntime();
rb.addGauge(MemNonHeapUsedM, memNonHeap.getUsed() / M)
.addGauge(MemNonHeapCommittedM, memNonHeap.getCommitted() / M)
.addGauge(MemNonHeapMaxM, memNonHeap.getMax() / M)
.addGauge(MemHeapUsedM, memHeap.getUsed() / M)
.addGauge(MemHeapCommittedM, memHeap.getCommitted() / M)
.addGauge(MemHeapMaxM, memHeap.getMax() / M)
.addGauge(MemMaxM, runtime.maxMemory() / M);
}

View File

@ -32,8 +32,10 @@ public enum JvmMetricsInfo implements MetricsInfo {
// metrics
MemNonHeapUsedM("Non-heap memory used in MB"),
MemNonHeapCommittedM("Non-heap memory committed in MB"),
MemNonHeapMaxM("Non-heap memory max in MB"),
MemHeapUsedM("Heap memory used in MB"),
MemHeapCommittedM("Heap memory committed in MB"),
MemHeapMaxM("Heap memory max in MB"),
MemMaxM("Max memory size in MB"),
GcCount("Total GC count"),
GcTimeMillis("Total GC time in milliseconds"),

View File

@ -171,6 +171,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
@ -210,6 +211,7 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.mortbay.util.ajax.JSON;
@ -4975,6 +4977,28 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return getBlockManager().getDatanodeManager().getNumDeadDataNodes();
}
@Override // FSNamesystemMBean
public int getNumDecomLiveDataNodes() {
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
int liveDecommissioned = 0;
for (DatanodeDescriptor node : live) {
liveDecommissioned += node.isDecommissioned() ? 1 : 0;
}
return liveDecommissioned;
}
@Override // FSNamesystemMBean
public int getNumDecomDeadDataNodes() {
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
getBlockManager().getDatanodeManager().fetchDatanodes(dead, null, true);
int deadDecommissioned = 0;
for (DatanodeDescriptor node : dead) {
deadDecommissioned += node.isDecommissioned() ? 1 : 0;
}
return deadDecommissioned;
}
@Override // FSNamesystemMBean
@Metric({"StaleDataNodes",
"Number of datanodes marked stale due to delayed heartbeat"})
@ -5787,6 +5811,91 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return JSON.toString(statusMap);
}
@Override // NameNodeMXBean
public String getNodeUsage() {
float median = 0;
float max = 0;
float min = 0;
float dev = 0;
final Map<String, Map<String,Object>> info =
new HashMap<String, Map<String,Object>>();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
if (live.size() > 0) {
float totalDfsUsed = 0;
float[] usages = new float[live.size()];
int i = 0;
for (DatanodeDescriptor dn : live) {
usages[i++] = dn.getDfsUsedPercent();
totalDfsUsed += dn.getDfsUsedPercent();
}
totalDfsUsed /= live.size();
Arrays.sort(usages);
median = usages[usages.length / 2];
max = usages[usages.length - 1];
min = usages[0];
for (i = 0; i < usages.length; i++) {
dev += (usages[i] - totalDfsUsed) * (usages[i] - totalDfsUsed);
}
dev = (float) Math.sqrt(dev / usages.length);
}
final Map<String, Object> innerInfo = new HashMap<String, Object>();
innerInfo.put("min", StringUtils.format("%.2f%%", min));
innerInfo.put("median", StringUtils.format("%.2f%%", median));
innerInfo.put("max", StringUtils.format("%.2f%%", max));
innerInfo.put("stdDev", StringUtils.format("%.2f%%", dev));
info.put("nodeUsage", innerInfo);
return JSON.toString(info);
}
@Override // NameNodeMXBean
public String getNameJournalStatus() {
List<Map<String, String>> jasList = new ArrayList<Map<String, String>>();
FSEditLog log = getFSImage().getEditLog();
if (log != null) {
boolean openForWrite = log.isOpenForWrite();
for (JournalAndStream jas : log.getJournals()) {
final Map<String, String> jasMap = new HashMap<String, String>();
String manager = jas.getManager().toString();
jasMap.put("required", String.valueOf(jas.isRequired()));
jasMap.put("disabled", String.valueOf(jas.isDisabled()));
jasMap.put("manager", manager);
if (jas.isDisabled()) {
jasMap.put("stream", "Failed");
} else if (openForWrite) {
EditLogOutputStream elos = jas.getCurrentStream();
if (elos != null) {
jasMap.put("stream", elos.generateHtmlReport());
} else {
jasMap.put("stream", "not currently writing");
}
} else {
jasMap.put("stream", "open for read");
}
jasList.add(jasMap);
}
}
return JSON.toString(jasList);
}
@Override // NameNodeMXBean
public String getNNStarted() {
return getStartTime().toString();
}
@Override // NameNodeMXBean
public String getCompileInfo() {
return VersionInfo.getDate() + " by " + VersionInfo.getUser() +
" from " + VersionInfo.getBranch();
}
/** @return the block manager. */
public BlockManager getBlockManager() {
return blockManager;

View File

@ -174,4 +174,32 @@ public interface NameNodeMXBean {
* @return the name dir status information, as a JSON string.
*/
public String getNameDirStatuses();
/**
* Get Max, Median, Min and Standard Deviation of DataNodes usage.
*
* @return the DataNode usage information, as a JSON string.
*/
public String getNodeUsage();
/**
* Get status information about the journals of the NN.
*
* @return the name journal status information, as a JSON string.
*/
public String getNameJournalStatus();
/**
* Gets the NN start time
*
* @return the NN start time
*/
public String getNNStarted();
/**
* Get the compilation information which contains date, user and branch
*
* @return the compilation information, as a JSON string.
*/
public String getCompileInfo();
}

View File

@ -118,4 +118,16 @@ public interface FSNamesystemMBean {
* @return number of stale data nodes
*/
public int getNumStaleDataNodes();
/**
* Number of decommissioned Live data nodes
* @return number of decommissioned live data nodes
*/
public int getNumDecomLiveDataNodes();
/**
* Number of decommissioned dead data nodes
* @return number of decommissioned dead data nodes
*/
public int getNumDecomDeadDataNodes();
}

View File

@ -112,6 +112,20 @@ public class TestNameNodeMXBean {
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadnodeinfo);
// get attribute NodeUsage
String nodeUsage = (String) (mbs.getAttribute(mxbeanName,
"NodeUsage"));
assertEquals("Bad value for NodeUsage", fsn.getNodeUsage(), nodeUsage);
// get attribute NameJournalStatus
String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName,
"NameJournalStatus"));
assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus);
// get attribute "NNStarted"
String nnStarted = (String) mbs.getAttribute(mxbeanName, "NNStarted");
assertEquals("Bad value for NNStarted", fsn.getNNStarted(), nnStarted);
// get attribute "CompileInfo"
String compileInfo = (String) mbs.getAttribute(mxbeanName, "CompileInfo");
assertEquals("Bad value for CompileInfo", fsn.getCompileInfo(), compileInfo);
// get attribute NameDirStatuses
String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
"NameDirStatuses"));