HDFS-14235. Handle ArrayIndexOutOfBoundsException in DataNodeDiskMetrics#slowDiskDetectionDaemon. Contributed by Ranith Sardar.
This commit is contained in:
parent
642fe6a260
commit
41e18feda3
|
@ -57,6 +57,10 @@ public class DataNodeDiskMetrics {
|
|||
private volatile Map<String, Map<DiskOp, Double>>
|
||||
diskOutliersStats = Maps.newHashMap();
|
||||
|
||||
// Adding for test purpose. When addSlowDiskForTesting() called from test
|
||||
// code, status should not be overridden by daemon thread.
|
||||
private boolean overrideStatus = true;
|
||||
|
||||
public DataNodeDiskMetrics(DataNode dn, long diskOutlierDetectionIntervalMs) {
|
||||
this.dn = dn;
|
||||
this.detectionInterval = diskOutlierDetectionIntervalMs;
|
||||
|
@ -71,6 +75,7 @@ public class DataNodeDiskMetrics {
|
|||
@Override
|
||||
public void run() {
|
||||
while (shouldRun) {
|
||||
if (dn.getFSDataset() != null) {
|
||||
Map<String, Double> metadataOpStats = Maps.newHashMap();
|
||||
Map<String, Double> readIoStats = Maps.newHashMap();
|
||||
Map<String, Double> writeIoStats = Maps.newHashMap();
|
||||
|
@ -81,7 +86,7 @@ public class DataNodeDiskMetrics {
|
|||
.iterator();
|
||||
while (volumeIterator.hasNext()) {
|
||||
FsVolumeSpi volume = volumeIterator.next();
|
||||
DataNodeVolumeMetrics metrics = volumeIterator.next().getMetrics();
|
||||
DataNodeVolumeMetrics metrics = volume.getMetrics();
|
||||
String volumeName = volume.getBaseURI().getPath();
|
||||
|
||||
metadataOpStats.put(volumeName,
|
||||
|
@ -98,14 +103,15 @@ public class DataNodeDiskMetrics {
|
|||
}
|
||||
}
|
||||
}
|
||||
if (metadataOpStats.isEmpty() && readIoStats.isEmpty() &&
|
||||
writeIoStats.isEmpty()) {
|
||||
if (metadataOpStats.isEmpty() && readIoStats.isEmpty()
|
||||
&& writeIoStats.isEmpty()) {
|
||||
LOG.debug("No disk stats available for detecting outliers.");
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
|
||||
detectAndUpdateDiskOutliers(metadataOpStats, readIoStats,
|
||||
writeIoStats);
|
||||
}
|
||||
|
||||
try {
|
||||
Thread.sleep(detectionInterval);
|
||||
|
@ -143,10 +149,11 @@ public class DataNodeDiskMetrics {
|
|||
for (Map.Entry<String, Double> entry : writeIoOutliers.entrySet()) {
|
||||
addDiskStat(diskStats, entry.getKey(), DiskOp.WRITE, entry.getValue());
|
||||
}
|
||||
|
||||
if (overrideStatus) {
|
||||
diskOutliersStats = diskStats;
|
||||
LOG.debug("Updated disk outliers.");
|
||||
}
|
||||
}
|
||||
|
||||
private void addDiskStat(Map<String, Map<DiskOp, Double>> diskStats,
|
||||
String disk, DiskOp diskOp, double latency) {
|
||||
|
@ -176,6 +183,7 @@ public class DataNodeDiskMetrics {
|
|||
@VisibleForTesting
|
||||
public void addSlowDiskForTesting(String slowDiskPath,
|
||||
Map<DiskOp, Double> latencies) {
|
||||
overrideStatus = false;
|
||||
if (latencies == null) {
|
||||
diskOutliersStats.put(slowDiskPath, ImmutableMap.of());
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue