HDFS-11517. Expose slow disks via DataNode JMX. Contributed by Hanisha Koneru

This commit is contained in:
Hanisha Koneru 2017-03-17 15:42:25 -07:00 committed by Arpit Agarwal
parent 4a8e304502
commit 7f8e928400
4 changed files with 65 additions and 2 deletions

View File

@ -1821,6 +1821,10 @@ public class DataNode extends ReconfigurableBase
public DataNodeMetrics getMetrics() {
return metrics;
}
public DataNodeDiskMetrics getDiskMetrics() {
return diskMetrics;
}
public DataNodePeerMetrics getPeerMetrics() {
return peerMetrics;
@ -3520,4 +3524,14 @@ public class DataNode extends ReconfigurableBase
return peerMetrics != null ?
peerMetrics.dumpSendPacketDownstreamAvgInfoAsJson() : null;
}
@Override // DataNodeMXBean
public String getSlowDisks() {
if (diskMetrics == null) {
//Disk Stats not enabled
return null;
}
Set<String> slowDisks = diskMetrics.getDiskOutliersStats().keySet();
return JSON.toString(slowDisks);
}
}

View File

@ -132,4 +132,11 @@ public interface DataNodeMXBean {
* </p>
*/
String getSendPacketDownstreamAvgInfo();
/**
* Gets the slow disks in the Datanode.
*
* @return list of slow disks
*/
String getSlowDisks();
}

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.datanode.metrics;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.hadoop.classification.InterfaceAudience;
@ -52,7 +54,8 @@ public class DataNodeDiskMetrics {
private volatile boolean shouldRun;
private OutlierDetector slowDiskDetector;
private Daemon slowDiskDetectionDaemon;
private volatile Map<String, Map<DiskOutlierDetectionOp, Double>> diskOutliersStats;
private volatile Map<String, Map<DiskOutlierDetectionOp, Double>>
diskOutliersStats = Maps.newHashMap();
public DataNodeDiskMetrics(DataNode dn, long diskOutlierDetectionIntervalMs) {
this.dn = dn;
@ -178,4 +181,12 @@ public class DataNodeDiskMetrics {
LOG.error("Disk Outlier Detection daemon did not shutdown", e);
}
}
/**
* Use only for testing.
*/
@VisibleForTesting
public void addSlowDiskForTesting(String slowDiskPath) {
diskOutliersStats.put(slowDiskPath, ImmutableMap.of());
}
}

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.GenericTestUtils;
@ -104,8 +105,12 @@ public class TestDataNodeMXBean {
String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
"BPServiceActorInfo");
Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);
String slowDisks = (String)mbs.getAttribute(mxbeanName, "SlowDisks");
Assert.assertEquals(datanode.getSlowDisks(), slowDisks);
} finally {
if (cluster != null) {cluster.shutdown();}
if (cluster != null) {
cluster.shutdown();
}
}
}
@ -209,4 +214,30 @@ public class TestDataNodeMXBean {
}
return totalBlocks;
}
@Test
public void testDataNodeMXBeanSlowDisksEnabled() throws Exception {
Configuration conf = new Configuration();
conf.setDouble(DFSConfigKeys
.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
List<DataNode> datanodes = cluster.getDataNodes();
Assert.assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
String slowDiskPath = "test/data1/slowVolume";
datanode.getDiskMetrics().addSlowDiskForTesting(slowDiskPath);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=DataNode,name=DataNodeInfo");
String slowDisks = (String)mbs.getAttribute(mxbeanName, "SlowDisks");
Assert.assertEquals(datanode.getSlowDisks(), slowDisks);
Assert.assertTrue(slowDisks.contains(slowDiskPath));
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
}